]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
Merge tag 'wireless-drivers-next-for-davem-2019-04-18' of git://git.kernel.org/pub...
authorDavid S. Miller <davem@davemloft.net>
Thu, 18 Apr 2019 18:07:55 +0000 (11:07 -0700)
committerDavid S. Miller <davem@davemloft.net>
Thu, 18 Apr 2019 18:07:55 +0000 (11:07 -0700)
Kalle Valo says:

====================
wireless-drivers-next patches for 5.2

Nothing really special standing out this time, iwlwifi being the most
active driver.

Major changes:

iwlwifi

* send NO_DATA events so they can be captured in radiotap

* support for multiple BSSID

* support for some new FW API versions

* support new hardware

* debugfs cleanups by Greg-KH

qtnfmac

* allow each MAC to specify its own regulatory rules
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
1388 files changed:
.clang-format
.mailmap
Documentation/accounting/psi.txt
Documentation/bpf/btf.rst
Documentation/devicetree/bindings/arm/cpus.yaml
Documentation/devicetree/bindings/hwmon/adc128d818.txt
Documentation/devicetree/bindings/i2c/i2c-iop3xx.txt [moved from Documentation/devicetree/bindings/i2c/i2c-xscale.txt with 100% similarity]
Documentation/devicetree/bindings/i2c/i2c-mt65xx.txt [moved from Documentation/devicetree/bindings/i2c/i2c-mtk.txt with 100% similarity]
Documentation/devicetree/bindings/i2c/i2c-stu300.txt [moved from Documentation/devicetree/bindings/i2c/i2c-st-ddci2c.txt with 100% similarity]
Documentation/devicetree/bindings/i2c/i2c-sun6i-p2wi.txt [moved from Documentation/devicetree/bindings/i2c/i2c-sunxi-p2wi.txt with 100% similarity]
Documentation/devicetree/bindings/i2c/i2c-wmt.txt [moved from Documentation/devicetree/bindings/i2c/i2c-vt8500.txt with 100% similarity]
Documentation/devicetree/bindings/net/mdio-mux-meson-g12a.txt [new file with mode: 0644]
Documentation/devicetree/bindings/serial/mtk-uart.txt
Documentation/filesystems/mount_api.txt
Documentation/i2c/busses/i2c-i801
Documentation/lzo.txt
Documentation/networking/bpf_flow_dissector.rst [new file with mode: 0644]
Documentation/networking/devlink-info-versions.rst
Documentation/networking/dsa/bcm_sf2.rst [moved from Documentation/networking/dsa/bcm_sf2.txt with 83% similarity]
Documentation/networking/dsa/dsa.rst [moved from Documentation/networking/dsa/dsa.txt with 67% similarity]
Documentation/networking/dsa/index.rst [new file with mode: 0644]
Documentation/networking/dsa/lan9303.rst [moved from Documentation/networking/dsa/lan9303.txt with 85% similarity]
Documentation/networking/index.rst
Documentation/networking/rxrpc.txt
Documentation/virtual/kvm/api.txt
Documentation/virtual/kvm/mmu.txt
MAINTAINERS
Makefile
arch/alpha/include/asm/Kbuild
arch/alpha/include/uapi/asm/kvm_para.h [deleted file]
arch/arc/include/asm/Kbuild
arch/arc/include/asm/syscall.h
arch/arc/include/uapi/asm/Kbuild
arch/arm/Kconfig
arch/arm/boot/dts/am335x-evm.dts
arch/arm/boot/dts/am335x-evmsk.dts
arch/arm/boot/dts/am33xx-l4.dtsi
arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts
arch/arm/boot/dts/imx6dl-yapp4-common.dtsi
arch/arm/boot/dts/imx6qdl-icore-rqs.dtsi
arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
arch/arm/boot/dts/imx6ull-pinfunc-snvs.h
arch/arm/boot/dts/rk3288-tinker.dtsi
arch/arm/boot/dts/rk3288-veyron.dtsi
arch/arm/boot/dts/rk3288.dtsi
arch/arm/boot/dts/sama5d2-pinfunc.h
arch/arm/configs/imx_v4_v5_defconfig
arch/arm/configs/imx_v6_v7_defconfig
arch/arm/include/asm/kvm_mmu.h
arch/arm/include/asm/stage2_pgtable.h
arch/arm/include/asm/syscall.h
arch/arm/include/uapi/asm/Kbuild
arch/arm/include/uapi/asm/kvm_para.h [deleted file]
arch/arm/mach-at91/pm.c
arch/arm/mach-imx/cpuidle-imx6q.c
arch/arm/mach-imx/mach-imx51.c
arch/arm/mach-iop13xx/setup.c
arch/arm/mach-iop13xx/tpmi.c
arch/arm/mach-milbeaut/platsmp.c
arch/arm/mach-omap1/board-ams-delta.c
arch/arm/mach-omap2/display.c
arch/arm/plat-iop/adma.c
arch/arm/plat-orion/common.c
arch/arm64/Kconfig.platforms
arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
arch/arm64/boot/dts/nvidia/tegra186.dtsi
arch/arm64/boot/dts/renesas/r8a774c0.dtsi
arch/arm64/boot/dts/renesas/r8a77990.dtsi
arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts
arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
arch/arm64/boot/dts/rockchip/rk3328.dtsi
arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dts
arch/arm64/include/asm/futex.h
arch/arm64/include/asm/kvm_mmu.h
arch/arm64/include/asm/module.h
arch/arm64/include/asm/syscall.h
arch/arm64/kernel/ftrace.c
arch/arm64/kernel/sdei.c
arch/arm64/kernel/setup.c
arch/arm64/kernel/traps.c
arch/arm64/kvm/reset.c
arch/c6x/include/asm/Kbuild
arch/c6x/include/asm/syscall.h
arch/c6x/include/uapi/asm/Kbuild
arch/csky/include/asm/syscall.h
arch/h8300/include/asm/Kbuild
arch/h8300/include/asm/syscall.h
arch/h8300/include/uapi/asm/Kbuild
arch/hexagon/include/asm/Kbuild
arch/hexagon/include/asm/syscall.h
arch/hexagon/include/uapi/asm/kvm_para.h [deleted file]
arch/ia64/include/asm/Kbuild
arch/ia64/include/asm/syscall.h
arch/ia64/include/uapi/asm/Kbuild
arch/ia64/kernel/ptrace.c
arch/m68k/include/asm/Kbuild
arch/m68k/include/uapi/asm/Kbuild
arch/microblaze/include/asm/Kbuild
arch/microblaze/include/asm/syscall.h
arch/microblaze/include/uapi/asm/Kbuild
arch/mips/configs/generic/board-ocelot.config
arch/mips/include/asm/syscall.h
arch/mips/kernel/kgdb.c
arch/mips/kernel/ptrace.c
arch/mips/sgi-ip27/ip27-irq.c
arch/nds32/include/asm/syscall.h
arch/nios2/include/asm/Kbuild
arch/nios2/include/asm/syscall.h
arch/nios2/include/uapi/asm/Kbuild
arch/openrisc/include/asm/Kbuild
arch/openrisc/include/asm/syscall.h
arch/openrisc/include/uapi/asm/Kbuild
arch/parisc/include/asm/Kbuild
arch/parisc/include/asm/ptrace.h
arch/parisc/include/asm/syscall.h
arch/parisc/include/uapi/asm/Kbuild
arch/parisc/kernel/process.c
arch/parisc/kernel/setup.c
arch/powerpc/include/asm/mmu.h
arch/powerpc/include/asm/syscall.h
arch/powerpc/kernel/exceptions-64s.S
arch/powerpc/kernel/head_32.S
arch/powerpc/kernel/kvm.c
arch/powerpc/kernel/vdso32/gettimeofday.S
arch/powerpc/lib/memcmp_64.S
arch/powerpc/platforms/pseries/pseries_energy.c
arch/powerpc/platforms/pseries/ras.c
arch/riscv/configs/rv32_defconfig [new file with mode: 0644]
arch/riscv/include/asm/fixmap.h
arch/riscv/include/asm/syscall.h
arch/riscv/include/asm/uaccess.h
arch/riscv/kernel/Makefile
arch/riscv/kernel/module.c
arch/riscv/kernel/setup.c
arch/riscv/mm/Makefile
arch/riscv/mm/init.c
arch/s390/include/asm/ap.h
arch/s390/include/asm/elf.h
arch/s390/include/asm/lowcore.h
arch/s390/include/asm/syscall.h
arch/s390/kernel/perf_cpum_cf_diag.c
arch/s390/kernel/smp.c
arch/s390/kernel/vtime.c
arch/sh/boards/of-generic.c
arch/sh/include/asm/Kbuild
arch/sh/include/asm/syscall_32.h
arch/sh/include/asm/syscall_64.h
arch/sh/include/uapi/asm/Kbuild
arch/sparc/include/asm/Kbuild
arch/sparc/include/asm/syscall.h
arch/sparc/include/uapi/asm/kvm_para.h [deleted file]
arch/sparc/kernel/pci_sun4v.c
arch/um/include/asm/syscall-generic.h
arch/unicore32/include/asm/Kbuild
arch/unicore32/include/uapi/asm/Kbuild
arch/x86/Kconfig
arch/x86/Makefile
arch/x86/boot/compressed/misc.h
arch/x86/events/amd/core.c
arch/x86/events/core.c
arch/x86/events/intel/core.c
arch/x86/include/asm/bitops.h
arch/x86/include/asm/cpufeature.h
arch/x86/include/asm/kvm_emulate.h
arch/x86/include/asm/kvm_host.h
arch/x86/include/asm/realmode.h
arch/x86/include/asm/syscall.h
arch/x86/include/asm/xen/hypercall.h
arch/x86/include/uapi/asm/vmx.h
arch/x86/kernel/cpu/resctrl/monitor.c
arch/x86/kernel/cpu/resctrl/rdtgroup.c
arch/x86/kvm/emulate.c
arch/x86/kvm/hyperv.c
arch/x86/kvm/lapic.c
arch/x86/kvm/mmu.c
arch/x86/kvm/mmu.h
arch/x86/kvm/mmutrace.h
arch/x86/kvm/pmu.c
arch/x86/kvm/svm.c
arch/x86/kvm/trace.h
arch/x86/kvm/vmx/nested.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/vmx/vmx.h
arch/x86/kvm/x86.c
arch/x86/kvm/x86.h
arch/x86/mm/mmap.c
arch/x86/platform/efi/quirks.c
arch/x86/realmode/init.c
arch/xtensa/include/asm/Kbuild
arch/xtensa/include/asm/processor.h
arch/xtensa/include/asm/syscall.h
arch/xtensa/include/uapi/asm/Kbuild
arch/xtensa/kernel/entry.S
arch/xtensa/kernel/stacktrace.c
arch/xtensa/mm/mmu.c
block/bfq-iosched.c
block/bfq-iosched.h
block/bfq-wf2q.c
block/bio.c
block/blk-core.c
block/blk-flush.c
block/blk-mq-sched.c
block/blk-mq.c
block/blk-mq.h
drivers/acpi/acpica/evgpe.c
drivers/acpi/acpica/nsobject.c
drivers/acpi/bus.c
drivers/acpi/cppc_acpi.c
drivers/acpi/nfit/core.c
drivers/acpi/nfit/intel.c
drivers/android/binder.c
drivers/android/binder_alloc.c
drivers/ata/libata-zpodd.c
drivers/block/null_blk_main.c
drivers/block/paride/pcd.c
drivers/block/paride/pf.c
drivers/block/virtio_blk.c
drivers/block/xsysace.c
drivers/block/zram/zram_drv.c
drivers/bluetooth/btusb.c
drivers/char/Kconfig
drivers/char/ipmi/ipmi_dmi.c
drivers/char/ipmi/ipmi_msghandler.c
drivers/char/ipmi/ipmi_si_hardcode.c
drivers/char/tpm/eventlog/tpm2.c
drivers/char/tpm/tpm-dev-common.c
drivers/char/tpm/tpm-interface.c
drivers/clk/at91/clk-programmable.c
drivers/clk/at91/pmc.h
drivers/clk/at91/sama5d2.c
drivers/clk/imx/clk-pll14xx.c
drivers/clk/mediatek/clk-gate.c
drivers/clk/meson/clk-pll.c
drivers/clk/meson/g12a.c
drivers/clk/meson/gxbb.c
drivers/clk/meson/vid-pll-div.c
drivers/clk/x86/clk-pmc-atom.c
drivers/cpufreq/intel_pstate.c
drivers/cpufreq/scpi-cpufreq.c
drivers/crypto/caam/caamhash.c
drivers/dma/stm32-mdma.c
drivers/gpio/gpio-adnp.c
drivers/gpio/gpio-aspeed.c
drivers/gpio/gpio-exar.c
drivers/gpio/gpio-mockup.c
drivers/gpio/gpiolib-of.c
drivers/gpio/gpiolib.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
drivers/gpu/drm/amd/amdkfd/kfd_device.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/dc/core/dc_link.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h
drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h
drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
drivers/gpu/drm/drm_atomic_helper.c
drivers/gpu/drm/drm_drv.c
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/drm_file.c
drivers/gpu/drm/i915/gvt/cmd_parser.c
drivers/gpu/drm/i915/gvt/display.c
drivers/gpu/drm/i915/gvt/dmabuf.c
drivers/gpu/drm/i915/gvt/gtt.c
drivers/gpu/drm/i915/gvt/gtt.h
drivers/gpu/drm/i915/gvt/kvmgt.c
drivers/gpu/drm/i915/gvt/mmio_context.c
drivers/gpu/drm/i915/gvt/scheduler.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/icl_dsi.c
drivers/gpu/drm/i915/intel_ddi.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/selftests/i915_gem_evict.c
drivers/gpu/drm/i915/vlv_dsi.c
drivers/gpu/drm/mediatek/mtk_dpi.c
drivers/gpu/drm/mediatek/mtk_drm_drv.c
drivers/gpu/drm/mediatek/mtk_drm_gem.c
drivers/gpu/drm/mediatek/mtk_drm_gem.h
drivers/gpu/drm/mediatek/mtk_hdmi.c
drivers/gpu/drm/mediatek/mtk_hdmi_phy.c
drivers/gpu/drm/mediatek/mtk_hdmi_phy.h
drivers/gpu/drm/mediatek/mtk_mt2701_hdmi_phy.c
drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c
drivers/gpu/drm/meson/meson_drv.c
drivers/gpu/drm/meson/meson_dw_hdmi.c
drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c
drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
drivers/gpu/drm/rockchip/rockchip_drm_vop.c
drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
drivers/gpu/drm/sun4i/sun8i_tcon_top.c
drivers/gpu/drm/tegra/hub.c
drivers/gpu/drm/tegra/vic.c
drivers/gpu/drm/udl/udl_connector.c
drivers/gpu/drm/udl/udl_drv.c
drivers/gpu/drm/udl/udl_drv.h
drivers/gpu/drm/udl/udl_main.c
drivers/gpu/drm/vgem/vgem_drv.c
drivers/gpu/drm/vkms/vkms_gem.c
drivers/gpu/host1x/hw/channel_hw.c
drivers/hid/Kconfig
drivers/hid/hid-core.c
drivers/hid/hid-debug.c
drivers/hid/hid-ids.h
drivers/hid/hid-input.c
drivers/hid/hid-logitech-hidpp.c
drivers/hid/hid-quirks.c
drivers/hid/hid-steam.c
drivers/hid/hid-uclogic-params.c
drivers/hid/i2c-hid/i2c-hid-core.c
drivers/hwmon/Kconfig
drivers/hwmon/ntc_thermistor.c
drivers/hwmon/occ/common.c
drivers/i2c/busses/Kconfig
drivers/i2c/busses/i2c-i801.c
drivers/i2c/busses/i2c-imx.c
drivers/infiniband/core/addr.c
drivers/infiniband/hw/hfi1/chip.c
drivers/infiniband/hw/hfi1/qp.c
drivers/infiniband/hw/hfi1/rc.c
drivers/infiniband/hw/hfi1/tid_rdma.c
drivers/infiniband/hw/hns/hns_roce_hem.c
drivers/infiniband/hw/hns/hns_roce_mr.c
drivers/infiniband/hw/hns/hns_roce_qp.c
drivers/infiniband/hw/mlx5/cmd.c
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/mlx5/mr.c
drivers/infiniband/hw/mlx5/odp.c
drivers/infiniband/hw/mlx5/qp.c
drivers/infiniband/hw/nes/nes_cm.c
drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
drivers/iommu/amd_iommu.c
drivers/iommu/amd_iommu_init.c
drivers/iommu/amd_iommu_types.h
drivers/iommu/io-pgtable-arm-v7s.c
drivers/iommu/iommu.c
drivers/irqchip/irq-ls1x.c
drivers/isdn/mISDN/socket.c
drivers/leds/leds-pca9532.c
drivers/leds/trigger/ledtrig-netdev.c
drivers/lightnvm/pblk-read.c
drivers/md/dm-core.h
drivers/md/dm-init.c
drivers/md/dm-integrity.c
drivers/md/dm-rq.c
drivers/md/dm-table.c
drivers/md/dm.c
drivers/mfd/Kconfig
drivers/mfd/sprd-sc27xx-spi.c
drivers/mfd/twl-core.c
drivers/misc/habanalabs/command_submission.c
drivers/misc/habanalabs/debugfs.c
drivers/misc/habanalabs/device.c
drivers/misc/habanalabs/goya/goya.c
drivers/misc/habanalabs/habanalabs.h
drivers/misc/habanalabs/hw_queue.c
drivers/misc/habanalabs/memory.c
drivers/misc/habanalabs/mmu.c
drivers/mmc/host/alcor.c
drivers/mmc/host/sdhci-omap.c
drivers/mtd/chips/cfi_cmdset_0002.c
drivers/net/appletalk/ipddp.c
drivers/net/bonding/bond_main.c
drivers/net/bonding/bond_sysfs_slave.c
drivers/net/dsa/microchip/ksz9477.c
drivers/net/dsa/mv88e6xxx/port.c
drivers/net/dummy.c
drivers/net/ethernet/amazon/ena/ena_netdev.c
drivers/net/ethernet/amd/xgbe/xgbe-dev.c
drivers/net/ethernet/aurora/nb8800.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
drivers/net/ethernet/broadcom/genet/bcmgenet.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/broadcom/tg3.h
drivers/net/ethernet/cadence/macb_main.c
drivers/net/ethernet/cavium/liquidio/lio_main.c
drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
drivers/net/ethernet/cavium/thunder/nicvf_main.c
drivers/net/ethernet/cavium/thunder/nicvf_queues.c
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
drivers/net/ethernet/chelsio/cxgb3/l2t.c
drivers/net/ethernet/chelsio/cxgb3/l2t.h
drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
drivers/net/ethernet/cisco/enic/enic_main.c
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/hisilicon/hns/hnae.c
drivers/net/ethernet/hisilicon/hns/hnae.h
drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
drivers/net/ethernet/hisilicon/hns/hns_enet.c
drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
drivers/net/ethernet/hisilicon/hns3/hnae3.c
drivers/net/ethernet/hisilicon/hns3/hnae3.h
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
drivers/net/ethernet/hisilicon/hns_mdio.c
drivers/net/ethernet/huawei/hinic/hinic_tx.c
drivers/net/ethernet/ibm/ehea/ehea_main.c
drivers/net/ethernet/ibm/ehea/ehea_qmr.c
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/intel/e1000/e1000_main.c
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/intel/fm10k/fm10k_main.c
drivers/net/ethernet/intel/i40e/Makefile
drivers/net/ethernet/intel/i40e/i40e.h
drivers/net/ethernet/intel/i40e/i40e_adminq.c
drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
drivers/net/ethernet/intel/i40e/i40e_common.c
drivers/net/ethernet/intel/i40e/i40e_dcb.c
drivers/net/ethernet/intel/i40e/i40e_dcb.h
drivers/net/ethernet/intel/i40e/i40e_ddp.c [new file with mode: 0644]
drivers/net/ethernet/intel/i40e/i40e_ethtool.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/i40e/i40e_prototype.h
drivers/net/ethernet/intel/i40e/i40e_ptp.c
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/i40e/i40e_type.h
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
drivers/net/ethernet/intel/i40e/i40e_xsk.c
drivers/net/ethernet/intel/iavf/i40e_adminq_cmd.h
drivers/net/ethernet/intel/iavf/iavf_txrx.c
drivers/net/ethernet/intel/ice/ice_txrx.c
drivers/net/ethernet/intel/igb/e1000_defines.h
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/igc/igc_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/mediatek/mtk_eth_soc.c
drivers/net/ethernet/mellanox/mlx4/en_tx.c
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer_tracepoint.h
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en/port.c
drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
drivers/net/ethernet/mellanox/mlx5/core/en_common.c
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
drivers/net/ethernet/mellanox/mlx5/core/eq.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
drivers/net/ethernet/mellanox/mlx5/core/events.c
drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h
drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
drivers/net/ethernet/mellanox/mlx5/core/health.c
drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
drivers/net/ethernet/mellanox/mlx5/core/uar.c
drivers/net/ethernet/mellanox/mlxsw/core.c
drivers/net/ethernet/mellanox/mlxsw/core.h
drivers/net/ethernet/mellanox/mlxsw/minimal.c
drivers/net/ethernet/mellanox/mlxsw/reg.h
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
drivers/net/ethernet/mellanox/mlxsw/switchib.c
drivers/net/ethernet/mellanox/mlxsw/switchx2.c
drivers/net/ethernet/mscc/ocelot.c
drivers/net/ethernet/neterion/vxge/vxge-config.c
drivers/net/ethernet/netronome/nfp/Makefile
drivers/net/ethernet/netronome/nfp/abm/ctrl.c
drivers/net/ethernet/netronome/nfp/abm/main.c
drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
drivers/net/ethernet/netronome/nfp/bpf/fw.h
drivers/net/ethernet/netronome/nfp/bpf/main.c
drivers/net/ethernet/netronome/nfp/bpf/main.h
drivers/net/ethernet/netronome/nfp/bpf/offload.c
drivers/net/ethernet/netronome/nfp/ccm.c [new file with mode: 0644]
drivers/net/ethernet/netronome/nfp/ccm.h [new file with mode: 0644]
drivers/net/ethernet/netronome/nfp/flower/action.c
drivers/net/ethernet/netronome/nfp/flower/cmsg.c
drivers/net/ethernet/netronome/nfp/flower/cmsg.h
drivers/net/ethernet/netronome/nfp/flower/main.c
drivers/net/ethernet/netronome/nfp/flower/main.h
drivers/net/ethernet/netronome/nfp/flower/match.c
drivers/net/ethernet/netronome/nfp/flower/metadata.c
drivers/net/ethernet/netronome/nfp/flower/offload.c
drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
drivers/net/ethernet/netronome/nfp/nfp_app.h
drivers/net/ethernet/netronome/nfp/nfp_devlink.c
drivers/net/ethernet/netronome/nfp/nfp_net.h
drivers/net/ethernet/netronome/nfp/nfp_net_common.c
drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
drivers/net/ethernet/netronome/nfp/nfp_port.c
drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
drivers/net/ethernet/pasemi/pasemi_mac.c
drivers/net/ethernet/qlogic/qed/qed.h
drivers/net/ethernet/qlogic/qed/qed_dev.c
drivers/net/ethernet/qlogic/qed/qed_int.c
drivers/net/ethernet/qlogic/qed/qed_int.h
drivers/net/ethernet/qlogic/qed/qed_main.c
drivers/net/ethernet/qlogic/qed/qed_sriov.c
drivers/net/ethernet/qlogic/qede/qede_fp.c
drivers/net/ethernet/qlogic/qede/qede_ptp.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
drivers/net/ethernet/rdc/r6040.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/rocker/rocker_main.c
drivers/net/ethernet/sfc/falcon/tx.c
drivers/net/ethernet/sfc/tx.c
drivers/net/ethernet/stmicro/stmmac/descs_com.h
drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
drivers/net/ethernet/stmicro/stmmac/enh_desc.c
drivers/net/ethernet/stmicro/stmmac/hwif.h
drivers/net/ethernet/stmicro/stmmac/norm_desc.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c
drivers/net/ethernet/ti/davinci_mdio.c
drivers/net/ethernet/xilinx/xilinx_emaclite.c
drivers/net/hyperv/hyperv_net.h
drivers/net/hyperv/netvsc.c
drivers/net/hyperv/netvsc_drv.c
drivers/net/loopback.c
drivers/net/macsec.c
drivers/net/netdevsim/Makefile
drivers/net/netdevsim/bpf.c
drivers/net/netdevsim/netdev.c
drivers/net/netdevsim/netdevsim.h
drivers/net/netdevsim/sdev.c [new file with mode: 0644]
drivers/net/phy/Kconfig
drivers/net/phy/Makefile
drivers/net/phy/amd.c
drivers/net/phy/aquantia_main.c
drivers/net/phy/asix.c
drivers/net/phy/at803x.c
drivers/net/phy/bcm-cygnus.c
drivers/net/phy/bcm63xx.c
drivers/net/phy/bcm7xxx.c
drivers/net/phy/broadcom.c
drivers/net/phy/cicada.c
drivers/net/phy/davicom.c
drivers/net/phy/dp83640.c
drivers/net/phy/dp83822.c
drivers/net/phy/dp83848.c
drivers/net/phy/dp83867.c
drivers/net/phy/dp83tc811.c
drivers/net/phy/et1011c.c
drivers/net/phy/icplus.c
drivers/net/phy/intel-xway.c
drivers/net/phy/lxt.c
drivers/net/phy/marvell.c
drivers/net/phy/marvell10g.c
drivers/net/phy/mdio-mux-meson-g12a.c [new file with mode: 0644]
drivers/net/phy/meson-gxl.c
drivers/net/phy/micrel.c
drivers/net/phy/microchip.c
drivers/net/phy/mscc.c
drivers/net/phy/national.c
drivers/net/phy/phy-c45.c
drivers/net/phy/phy-core.c
drivers/net/phy/phy_device.c
drivers/net/phy/qsemi.c
drivers/net/phy/realtek.c
drivers/net/phy/rockchip.c
drivers/net/phy/smsc.c
drivers/net/phy/ste10Xp.c
drivers/net/phy/uPD60620.c
drivers/net/phy/vitesse.c
drivers/net/team/team.c
drivers/net/usb/qmi_wwan.c
drivers/net/usb/r8152.c
drivers/net/veth.c
drivers/net/virtio_net.c
drivers/net/vrf.c
drivers/net/wireless/ath/ath10k/htt_rx.c
drivers/net/wireless/ath/ath10k/mac.c
drivers/net/wireless/ath/ath9k/xmit.c
drivers/net/wireless/intel/iwlwifi/cfg/22000.c
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
drivers/net/wireless/intel/iwlwifi/fw/init.c
drivers/net/wireless/intel/iwlwifi/iwl-config.h
drivers/net/wireless/intel/iwlwifi/iwl-csr.h
drivers/net/wireless/intel/iwlwifi/iwl-trans.h
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
drivers/net/wireless/intel/iwlwifi/mvm/sta.c
drivers/net/wireless/intel/iwlwifi/mvm/sta.h
drivers/net/wireless/intel/iwlwifi/pcie/drv.c
drivers/net/wireless/intel/iwlwifi/pcie/internal.h
drivers/net/wireless/intel/iwlwifi/pcie/trans.c
drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
drivers/net/wireless/intel/iwlwifi/pcie/tx.c
drivers/net/wireless/mac80211_hwsim.c
drivers/net/wireless/mediatek/mt76/mt7603/init.c
drivers/net/wireless/mediatek/mt76/mt7603/mac.c
drivers/net/wireless/mediatek/mt76/mt7603/main.c
drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h
drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
drivers/net/wireless/ralink/rt2x00/rt2x00.h
drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
drivers/net/xen-netback/common.h
drivers/net/xen-netback/xenbus.c
drivers/net/xen-netfront.c
drivers/nvdimm/btt_devs.c
drivers/nvdimm/namespace_devs.c
drivers/nvdimm/pmem.c
drivers/nvdimm/security.c
drivers/nvme/host/core.c
drivers/nvme/host/fc.c
drivers/nvme/host/multipath.c
drivers/nvme/host/tcp.c
drivers/nvme/target/admin-cmd.c
drivers/nvme/target/core.c
drivers/nvme/target/discovery.c
drivers/nvme/target/io-cmd-file.c
drivers/nvme/target/nvmet.h
drivers/parisc/iosapic.c
drivers/pci/hotplug/pciehp_ctrl.c
drivers/pci/pci.h
drivers/pci/pcie/bw_notification.c
drivers/pci/probe.c
drivers/pci/quirks.c
drivers/phy/allwinner/phy-sun4i-usb.c
drivers/platform/x86/pmc_atom.c
drivers/reset/reset-meson-audio-arb.c
drivers/rtc/Kconfig
drivers/rtc/rtc-cros-ec.c
drivers/rtc/rtc-da9063.c
drivers/rtc/rtc-sh.c
drivers/s390/cio/chsc.c
drivers/s390/cio/vfio_ccw_drv.c
drivers/s390/crypto/ap_bus.c
drivers/s390/crypto/ap_bus.h
drivers/s390/crypto/ap_queue.c
drivers/s390/crypto/zcrypt_api.c
drivers/s390/net/qeth_core.h
drivers/s390/net/qeth_core_main.c
drivers/s390/net/qeth_core_sys.c
drivers/s390/net/qeth_ethtool.c
drivers/s390/net/qeth_l2_main.c
drivers/s390/net/qeth_l3_main.c
drivers/s390/scsi/zfcp_erp.c
drivers/s390/scsi/zfcp_ext.h
drivers/s390/scsi/zfcp_fc.c
drivers/s390/scsi/zfcp_scsi.c
drivers/scsi/aacraid/aacraid.h
drivers/scsi/aacraid/commsup.c
drivers/scsi/csiostor/csio_scsi.c
drivers/scsi/ibmvscsi/ibmvfc.c
drivers/scsi/ibmvscsi/ibmvfc.h
drivers/scsi/lpfc/lpfc_scsi.c
drivers/scsi/mpt3sas/mpt3sas_base.c
drivers/scsi/mpt3sas/mpt3sas_scsih.c
drivers/scsi/qedi/qedi_main.c
drivers/scsi/qla4xxx/ql4_os.c
drivers/scsi/scsi_devinfo.c
drivers/scsi/scsi_dh.c
drivers/scsi/scsi_sysfs.c
drivers/scsi/sd.c
drivers/scsi/storvsc_drv.c
drivers/scsi/virtio_scsi.c
drivers/soc/bcm/bcm2835-power.c
drivers/staging/Kconfig
drivers/staging/Makefile
drivers/staging/axis-fifo/Kconfig
drivers/staging/comedi/comedidev.h
drivers/staging/comedi/drivers.c
drivers/staging/comedi/drivers/ni_mio_common.c
drivers/staging/erofs/dir.c
drivers/staging/erofs/unzip_vle.c
drivers/staging/erofs/unzip_vle_lz4.c
drivers/staging/mt7621-dts/gbpc1.dts
drivers/staging/mt7621-dts/mt7621.dtsi
drivers/staging/mt7621-eth/Documentation/devicetree/bindings/net/mediatek-net-gsw.txt [deleted file]
drivers/staging/mt7621-eth/Kconfig [deleted file]
drivers/staging/mt7621-eth/Makefile [deleted file]
drivers/staging/mt7621-eth/TODO [deleted file]
drivers/staging/mt7621-eth/ethtool.c [deleted file]
drivers/staging/mt7621-eth/ethtool.h [deleted file]
drivers/staging/mt7621-eth/gsw_mt7620.h [deleted file]
drivers/staging/mt7621-eth/gsw_mt7621.c [deleted file]
drivers/staging/mt7621-eth/mdio.c [deleted file]
drivers/staging/mt7621-eth/mdio.h [deleted file]
drivers/staging/mt7621-eth/mdio_mt7620.c [deleted file]
drivers/staging/mt7621-eth/mtk_eth_soc.c [deleted file]
drivers/staging/mt7621-eth/mtk_eth_soc.h [deleted file]
drivers/staging/mt7621-eth/soc_mt7621.c [deleted file]
drivers/staging/mt7621-pci/Kconfig
drivers/staging/octeon/ethernet-mdio.c
drivers/staging/octeon/ethernet.c
drivers/staging/octeon/octeon-ethernet.h
drivers/staging/olpc_dcon/olpc_dcon_xo_1.c
drivers/staging/rtl8188eu/core/rtw_xmit.c
drivers/staging/rtl8188eu/include/rtw_xmit.h
drivers/staging/rtl8712/rtl8712_cmd.c
drivers/staging/rtl8712/rtl8712_cmd.h
drivers/staging/rtl8723bs/core/rtw_xmit.c
drivers/staging/rtl8723bs/include/rtw_xmit.h
drivers/staging/rtlwifi/phydm/rtl_phydm.c
drivers/staging/rtlwifi/rtl8822be/fw.c
drivers/staging/speakup/speakup_soft.c
drivers/staging/speakup/spk_priv.h
drivers/staging/speakup/synth.c
drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
drivers/staging/vt6655/device_main.c
drivers/tty/serial/ar933x_uart.c
drivers/tty/serial/atmel_serial.c
drivers/tty/serial/kgdboc.c
drivers/tty/serial/max310x.c
drivers/tty/serial/mvebu-uart.c
drivers/tty/serial/mxs-auart.c
drivers/tty/serial/qcom_geni_serial.c
drivers/tty/serial/sc16is7xx.c
drivers/tty/serial/sh-sci.c
drivers/tty/tty_port.c
drivers/usb/class/cdc-acm.c
drivers/usb/common/common.c
drivers/usb/core/hcd.c
drivers/usb/dwc3/dwc3-pci.c
drivers/usb/gadget/function/f_hid.c
drivers/usb/gadget/udc/net2272.c
drivers/usb/gadget/udc/net2280.c
drivers/usb/host/u132-hcd.c
drivers/usb/host/xhci-dbgcap.c
drivers/usb/host/xhci-hub.c
drivers/usb/host/xhci-rcar.c
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci.h
drivers/usb/misc/usb251xb.c
drivers/usb/mtu3/Kconfig
drivers/usb/serial/cp210x.c
drivers/usb/serial/ftdi_sio.c
drivers/usb/serial/ftdi_sio_ids.h
drivers/usb/serial/mos7720.c
drivers/usb/serial/option.c
drivers/usb/typec/tcpm/tcpm.c
drivers/usb/typec/tcpm/wcove.c
drivers/vfio/pci/vfio_pci.c
drivers/vfio/vfio_iommu_spapr_tce.c
drivers/vfio/vfio_iommu_type1.c
drivers/vhost/vhost.c
drivers/virt/vboxguest/vboxguest_core.c
drivers/virt/vboxguest/vboxguest_core.h
drivers/virt/vboxguest/vboxguest_linux.c
drivers/virt/vboxguest/vboxguest_utils.c
drivers/virt/vboxguest/vboxguest_version.h
drivers/virt/vboxguest/vmmdev.h
drivers/virtio/virtio_pci_common.c
drivers/virtio/virtio_ring.c
drivers/xen/privcmd-buf.c
drivers/xen/xenbus/xenbus_dev_frontend.c
fs/afs/fsclient.c
fs/afs/rxrpc.c
fs/afs/yfsclient.c
fs/aio.c
fs/block_dev.c
fs/btrfs/ioctl.c
fs/btrfs/props.c
fs/ceph/inode.c
fs/cifs/cifsfs.c
fs/cifs/cifsglob.h
fs/cifs/connect.c
fs/cifs/smb2file.c
fs/cifs/smb2ops.c
fs/cifs/smb2pdu.c
fs/cifs/smb2proto.h
fs/dax.c
fs/debugfs/inode.c
fs/fs_parser.c
fs/fuse/dev.c
fs/hugetlbfs/inode.c
fs/io_uring.c
fs/jffs2/readinode.c
fs/jffs2/super.c
fs/nfs/nfs42proc.c
fs/nfs/nfs4file.c
fs/nfs/nfs4xdr.c
fs/nfs/super.c
fs/ocfs2/refcounttree.c
fs/open.c
fs/pipe.c
fs/proc/base.c
fs/proc/kcore.c
fs/proc/proc_sysctl.c
fs/read_write.c
fs/splice.c
fs/ubifs/super.c
fs/xfs/libxfs/xfs_bmap.c
fs/xfs/scrub/btree.c
fs/xfs/scrub/dabtree.c
fs/xfs/xfs_discard.c
fs/xfs/xfs_file.c
include/acpi/acoutput.h
include/acpi/platform/aclinux.h
include/asm-generic/syscall.h
include/drm/drm_modeset_helper_vtables.h
include/dt-bindings/clock/sifive-fu540-prci.h [new file with mode: 0644]
include/dt-bindings/reset/amlogic,meson-g12a-reset.h
include/keys/trusted.h
include/linux/bio.h
include/linux/bitrev.h
include/linux/blk-mq.h
include/linux/bpf.h
include/linux/bpf_verifier.h
include/linux/btf.h
include/linux/bvec.h
include/linux/device.h
include/linux/fs.h
include/linux/hugetlb.h
include/linux/if_bridge.h
include/linux/jiffies.h
include/linux/kcore.h
include/linux/kernel.h
include/linux/kvm_host.h
include/linux/list.h
include/linux/memcontrol.h
include/linux/mii.h
include/linux/mlx5/cq.h
include/linux/mlx5/device.h
include/linux/mlx5/doorbell.h
include/linux/mlx5/driver.h
include/linux/mlx5/mlx5_ifc.h
include/linux/mm.h
include/linux/mm_types.h
include/linux/netdevice.h
include/linux/netfilter.h
include/linux/netfilter/nfnetlink_osf.h
include/linux/netfilter/x_tables.h
include/linux/netfilter_ipv6.h
include/linux/nvme.h
include/linux/page-isolation.h
include/linux/phy.h
include/linux/pipe_fs_i.h
include/linux/platform_data/gpio/gpio-amd-fch.h
include/linux/platform_data/x86/clk-pmc-atom.h
include/linux/ptrace.h
include/linux/rhashtable-types.h
include/linux/rhashtable.h
include/linux/sched/signal.h
include/linux/skbuff.h
include/linux/slab.h
include/linux/string.h
include/linux/sunrpc/sched.h
include/linux/vbox_utils.h
include/linux/virtio_ring.h
include/net/af_rxrpc.h
include/net/cfg80211.h
include/net/devlink.h
include/net/dsa.h
include/net/ip.h
include/net/ip6_fib.h
include/net/ip6_route.h
include/net/ip_fib.h
include/net/ip_vs.h
include/net/ipv6_stubs.h
include/net/mac80211.h
include/net/ndisc.h
include/net/neighbour.h
include/net/net_namespace.h
include/net/netfilter/ipv4/nf_nat_masquerade.h [deleted file]
include/net/netfilter/ipv6/nf_nat_masquerade.h [deleted file]
include/net/netfilter/nf_conntrack_expect.h
include/net/netfilter/nf_nat.h
include/net/netfilter/nf_nat_masquerade.h [new file with mode: 0644]
include/net/netfilter/nf_queue.h
include/net/netfilter/nf_tables.h
include/net/netns/hash.h
include/net/netrom.h
include/net/nfc/nci_core.h
include/net/route.h
include/net/sch_generic.h
include/net/sctp/sctp.h
include/net/sctp/ulpqueue.h
include/net/sock.h
include/net/tls.h
include/net/udp.h
include/sound/soc.h
include/trace/events/fib.h
include/trace/events/fib6.h
include/trace/events/mlxsw.h
include/trace/events/syscalls.h
include/uapi/linux/Kbuild
include/uapi/linux/bpf.h
include/uapi/linux/btf.h
include/uapi/linux/ethtool.h
include/uapi/linux/ip_vs.h
include/uapi/linux/netfilter/nf_tables.h
include/uapi/linux/vbox_vmmdev_types.h
include/uapi/sound/asound.h
ipc/util.c
kernel/bpf/arraymap.c
kernel/bpf/btf.c
kernel/bpf/core.c
kernel/bpf/cpumap.c
kernel/bpf/disasm.c
kernel/bpf/hashtab.c
kernel/bpf/inode.c
kernel/bpf/local_storage.c
kernel/bpf/lpm_trie.c
kernel/bpf/queue_stack_maps.c
kernel/bpf/syscall.c
kernel/bpf/verifier.c
kernel/cpu.c
kernel/dma/debug.c
kernel/events/core.c
kernel/events/ring_buffer.c
kernel/irq/chip.c
kernel/irq/irqdesc.c
kernel/locking/lockdep.c
kernel/ptrace.c
kernel/sched/fair.c
kernel/seccomp.c
kernel/signal.c
kernel/sysctl.c
kernel/time/alarmtimer.c
kernel/time/time.c
kernel/trace/trace.c
kernel/trace/trace_syscalls.c
kernel/watchdog.c
lib/Kconfig.debug
lib/iov_iter.c
lib/lzo/lzo1x_compress.c
lib/lzo/lzo1x_decompress_safe.c
lib/rhashtable.c
lib/sbitmap.c
lib/string.c
lib/syscall.c
lib/test_rhashtable.c
mm/compaction.c
mm/debug.c
mm/gup.c
mm/huge_memory.c
mm/hugetlb.c
mm/kasan/kasan.h
mm/kmemleak.c
mm/memcontrol.c
mm/memory.c
mm/memory_hotplug.c
mm/mempolicy.c
mm/migrate.c
mm/page_alloc.c
mm/page_isolation.c
mm/slab.c
mm/slab.h
mm/slab_common.c
mm/slub.c
mm/sparse.c
mm/util.c
net/8021q/vlan_dev.c
net/atm/clip.c
net/atm/lec.c
net/batman-adv/bat_v_elp.c
net/batman-adv/bridge_loop_avoidance.c
net/batman-adv/sysfs.c
net/batman-adv/translation-table.c
net/bluetooth/sco.c
net/bpf/Makefile
net/bpf/test_run.c
net/bridge/br_arp_nd_proxy.c
net/bridge/br_fdb.c
net/bridge/br_forward.c
net/bridge/br_input.c
net/bridge/br_multicast.c
net/bridge/br_netlink.c
net/bridge/br_private.h
net/bridge/br_stp_if.c
net/bridge/br_vlan.c
net/bridge/br_vlan_tunnel.c
net/bridge/netfilter/ebtable_broute.c
net/bridge/netfilter/ebtables.c
net/caif/caif_dev.c
net/ceph/messenger.c
net/core/datagram.c
net/core/dev.c
net/core/devlink.c
net/core/ethtool.c
net/core/failover.c
net/core/filter.c
net/core/flow_dissector.c
net/core/gen_stats.c
net/core/neighbour.c
net/core/net-sysfs.c
net/core/net_namespace.c
net/core/ptp_classifier.c
net/core/rtnetlink.c
net/core/skbuff.c
net/core/sock.c
net/dccp/feat.c
net/decnet/af_decnet.c
net/dns_resolver/dns_query.c
net/dsa/dsa2.c
net/dsa/slave.c
net/dsa/tag_qca.c
net/hsr/Makefile
net/hsr/hsr_debugfs.c [new file with mode: 0644]
net/hsr/hsr_device.c
net/hsr/hsr_device.h
net/hsr/hsr_forward.c
net/hsr/hsr_forward.h
net/hsr/hsr_framereg.c
net/hsr/hsr_framereg.h
net/hsr/hsr_main.c
net/hsr/hsr_main.h
net/hsr/hsr_netlink.c
net/hsr/hsr_netlink.h
net/hsr/hsr_slave.c
net/hsr/hsr_slave.h
net/ipv4/af_inet.c
net/ipv4/fib_frontend.c
net/ipv4/fib_lookup.h
net/ipv4/fib_semantics.c
net/ipv4/fib_trie.c
net/ipv4/fou.c
net/ipv4/inet_connection_sock.c
net/ipv4/ip_forward.c
net/ipv4/ip_gre.c
net/ipv4/ip_input.c
net/ipv4/ip_options.c
net/ipv4/ip_output.c
net/ipv4/ipmr.c
net/ipv4/netfilter/Kconfig
net/ipv4/netfilter/Makefile
net/ipv4/netfilter/nft_chain_route_ipv4.c [deleted file]
net/ipv4/route.c
net/ipv4/tcp_dctcp.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_output.c
net/ipv4/udp.c
net/ipv4/xfrm4_policy.c
net/ipv6/addrconf.c
net/ipv6/addrconf_core.c
net/ipv6/af_inet6.c
net/ipv6/fib6_rules.c
net/ipv6/ila/ila_xlat.c
net/ipv6/ip6_fib.c
net/ipv6/ip6_gre.c
net/ipv6/ip6_output.c
net/ipv6/ip6_tunnel.c
net/ipv6/ip6mr.c
net/ipv6/ndisc.c
net/ipv6/netfilter/Kconfig
net/ipv6/netfilter/Makefile
net/ipv6/netfilter/ip6t_MASQUERADE.c [deleted file]
net/ipv6/netfilter/nft_chain_route_ipv6.c [deleted file]
net/ipv6/route.c
net/ipv6/sit.c
net/ipv6/tcp_ipv6.c
net/ipv6/udp.c
net/kcm/kcmsock.c
net/llc/af_llc.c
net/mac80211/driver-ops.h
net/mac80211/key.c
net/mac80211/mesh_pathtbl.c
net/mac80211/rx.c
net/mac80211/trace_msg.h
net/mac80211/tx.c
net/mpls/mpls_iptunnel.c
net/netfilter/Kconfig
net/netfilter/Makefile
net/netfilter/core.c
net/netfilter/ipvs/ip_vs_ctl.c
net/netfilter/ipvs/ip_vs_xmit.c
net/netfilter/nf_conntrack_expect.c
net/netfilter/nf_conntrack_netlink.c
net/netfilter/nf_conntrack_sip.c
net/netfilter/nf_flow_table_ip.c
net/netfilter/nf_internals.h
net/netfilter/nf_nat_core.c
net/netfilter/nf_nat_masquerade.c
net/netfilter/nf_nat_proto.c
net/netfilter/nf_queue.c
net/netfilter/nf_tables_api.c
net/netfilter/nfnetlink_osf.c
net/netfilter/nft_chain_nat.c
net/netfilter/nft_chain_route.c [new file with mode: 0644]
net/netfilter/nft_masq.c
net/netfilter/nft_nat.c
net/netfilter/nft_osf.c
net/netfilter/nft_redir.c
net/netfilter/x_tables.c
net/netfilter/xt_MASQUERADE.c [moved from net/ipv4/netfilter/ipt_MASQUERADE.c with 52% similarity]
net/netlink/af_netlink.c
net/netrom/af_netrom.c
net/netrom/nr_loopback.c
net/netrom/nr_route.c
net/netrom/sysctl_net_netrom.c
net/nfc/nci/hci.c
net/openvswitch/conntrack.c
net/openvswitch/flow_netlink.c
net/rds/af_rds.c
net/rds/bind.c
net/rds/tcp.c
net/rxrpc/af_rxrpc.c
net/rxrpc/ar-internal.h
net/rxrpc/conn_event.c
net/rxrpc/input.c
net/rxrpc/local_object.c
net/rxrpc/peer_event.c
net/rxrpc/sendmsg.c
net/sched/act_sample.c
net/sched/cls_api.c
net/sched/cls_flower.c
net/sched/cls_matchall.c
net/sched/sch_api.c
net/sched/sch_cake.c
net/sched/sch_cbq.c
net/sched/sch_cbs.c
net/sched/sch_drr.c
net/sched/sch_generic.c
net/sched/sch_hfsc.c
net/sched/sch_htb.c
net/sched/sch_mq.c
net/sched/sch_mqprio.c
net/sched/sch_multiq.c
net/sched/sch_prio.c
net/sched/sch_qfq.c
net/sched/sch_red.c
net/sched/sch_sfb.c
net/sched/sch_taprio.c
net/sched/sch_tbf.c
net/sctp/protocol.c
net/sctp/sm_statefuns.c
net/sctp/socket.c
net/sctp/stream_interleave.c
net/sctp/ulpevent.c
net/sctp/ulpqueue.c
net/smc/af_smc.c
net/smc/smc.h
net/smc/smc_clc.c
net/smc/smc_clc.h
net/smc/smc_close.c
net/smc/smc_close.h
net/smc/smc_core.c
net/smc/smc_core.h
net/smc/smc_ism.c
net/smc/smc_pnet.c
net/smc/smc_pnet.h
net/strparser/strparser.c
net/sunrpc/clnt.c
net/sunrpc/xprtrdma/verbs.c
net/tipc/bcast.c
net/tipc/link.c
net/tipc/msg.h
net/tipc/name_table.c
net/tipc/netlink_compat.c
net/tipc/node.c
net/tipc/node.h
net/tipc/sysctl.c
net/tls/tls_device.c
net/tls/tls_main.c
net/tls/tls_sw.c
net/unix/af_unix.c
net/wireless/nl80211.c
net/wireless/reg.c
net/wireless/scan.c
net/wireless/util.c
samples/bpf/asm_goto_workaround.h
samples/bpf/offwaketime_user.c
samples/bpf/sampleip_user.c
samples/bpf/spintest_user.c
samples/bpf/trace_event_user.c
scripts/Makefile.build
scripts/checkpatch.pl
scripts/coccinelle/api/stream_open.cocci [new file with mode: 0644]
scripts/coccinelle/free/put_device.cocci
scripts/coccinelle/misc/badty.cocci
scripts/kconfig/lxdialog/inputbox.c
scripts/kconfig/nconf.c
scripts/kconfig/nconf.gui.c
scripts/link-vmlinux.sh
scripts/mod/modpost.c
security/Kconfig
security/apparmor/lsm.c
security/keys/trusted.c
security/yama/yama_lsm.c
sound/core/oss/pcm_oss.c
sound/core/pcm_native.c
sound/core/rawmidi.c
sound/core/seq/oss/seq_oss_synth.c
sound/core/seq/seq_clientmgr.c
sound/hda/ext/hdac_ext_bus.c
sound/hda/hdac_bus.c
sound/hda/hdac_component.c
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_ca0132.c
sound/pci/hda/patch_realtek.c
sound/soc/codecs/Kconfig
sound/soc/codecs/ab8500-codec.c
sound/soc/codecs/cs35l35.c
sound/soc/codecs/cs4270.c
sound/soc/codecs/hdac_hda.c
sound/soc/codecs/hdac_hda.h
sound/soc/codecs/hdmi-codec.c
sound/soc/codecs/nau8810.c
sound/soc/codecs/nau8824.c
sound/soc/codecs/rt5682.c
sound/soc/codecs/tlv320aic32x4-i2c.c
sound/soc/codecs/tlv320aic32x4-spi.c
sound/soc/codecs/tlv320aic32x4.c
sound/soc/codecs/tlv320aic3x.c
sound/soc/codecs/wm_adsp.c
sound/soc/codecs/wm_adsp.h
sound/soc/fsl/fsl_asrc.c
sound/soc/fsl/fsl_esai.c
sound/soc/generic/audio-graph-card.c
sound/soc/generic/simple-card.c
sound/soc/intel/atom/sst-mfld-platform-pcm.c
sound/soc/intel/boards/cht_bsw_max98090_ti.c
sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
sound/soc/intel/skylake/skl-messages.c
sound/soc/intel/skylake/skl-pcm.c
sound/soc/mediatek/common/mtk-btcvsd.c
sound/soc/mediatek/mt8183/mt8183-afe-clk.c
sound/soc/rockchip/rockchip_pdm.c
sound/soc/samsung/i2s.c
sound/soc/samsung/odroid.c
sound/soc/sh/rcar/core.c
sound/soc/sh/rcar/rsnd.h
sound/soc/sh/rcar/src.c
sound/soc/soc-core.c
sound/soc/soc-dapm.c
sound/soc/soc-pcm.c
sound/soc/soc-topology.c
sound/soc/stm/stm32_adfsdm.c
sound/soc/stm/stm32_i2s.c
sound/soc/stm/stm32_sai.c
sound/soc/stm/stm32_sai_sub.c
sound/xen/xen_snd_front_alsa.c
tools/arch/alpha/include/uapi/asm/mman.h
tools/arch/arm64/include/asm/barrier.h
tools/arch/mips/include/uapi/asm/mman.h
tools/arch/parisc/include/uapi/asm/mman.h
tools/arch/powerpc/include/uapi/asm/kvm.h
tools/arch/x86/include/asm/barrier.h
tools/arch/x86/include/asm/cpufeatures.h
tools/arch/xtensa/include/uapi/asm/mman.h
tools/bpf/bpftool/btf_dumper.c
tools/bpf/bpftool/map.c
tools/bpf/bpftool/prog.c
tools/bpf/bpftool/xlated_dumper.c
tools/build/feature/test-libopencsd.c
tools/include/linux/filter.h
tools/include/uapi/asm-generic/mman-common-tools.h [new file with mode: 0644]
tools/include/uapi/asm-generic/mman-common.h
tools/include/uapi/asm-generic/mman.h
tools/include/uapi/asm-generic/unistd.h
tools/include/uapi/drm/i915_drm.h
tools/include/uapi/linux/bpf.h
tools/include/uapi/linux/btf.h
tools/include/uapi/linux/fcntl.h
tools/include/uapi/linux/mman.h
tools/io_uring/io_uring-bench.c
tools/lib/bpf/.gitignore
tools/lib/bpf/Makefile
tools/lib/bpf/bpf.c
tools/lib/bpf/bpf.h
tools/lib/bpf/btf.c
tools/lib/bpf/btf.h
tools/lib/bpf/libbpf.c
tools/lib/bpf/libbpf.h
tools/lib/bpf/libbpf.map
tools/lib/bpf/libbpf.pc.template [new file with mode: 0644]
tools/lib/bpf/xsk.c
tools/objtool/Makefile
tools/objtool/check.c
tools/perf/Makefile.perf
tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
tools/perf/check-headers.sh
tools/perf/scripts/python/exported-sql-viewer.py
tools/perf/trace/beauty/mmap_flags.sh
tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
tools/perf/util/evlist.c
tools/perf/util/evlist.h
tools/perf/util/evsel.c
tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
tools/perf/util/machine.c
tools/perf/util/pmu.c
tools/power/x86/turbostat/turbostat.c
tools/testing/nvdimm/test/nfit.c
tools/testing/selftests/bpf/Makefile
tools/testing/selftests/bpf/bpf_helpers.h
tools/testing/selftests/bpf/config
tools/testing/selftests/bpf/flow_dissector_load.c
tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c
tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c [new file with mode: 0644]
tools/testing/selftests/bpf/prog_tests/flow_dissector.c
tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c
tools/testing/selftests/bpf/prog_tests/global_data.c [new file with mode: 0644]
tools/testing/selftests/bpf/prog_tests/skb_ctx.c [new file with mode: 0644]
tools/testing/selftests/bpf/progs/bpf_flow.c
tools/testing/selftests/bpf/progs/test_global_data.c [new file with mode: 0644]
tools/testing/selftests/bpf/progs/test_jhash.h [new file with mode: 0644]
tools/testing/selftests/bpf/progs/test_skb_ctx.c [new file with mode: 0644]
tools/testing/selftests/bpf/progs/test_tc_tunnel.c
tools/testing/selftests/bpf/progs/test_verif_scale1.c [new file with mode: 0644]
tools/testing/selftests/bpf/progs/test_verif_scale2.c [new file with mode: 0644]
tools/testing/selftests/bpf/progs/test_verif_scale3.c [new file with mode: 0644]
tools/testing/selftests/bpf/test_btf.c
tools/testing/selftests/bpf/test_offload.py
tools/testing/selftests/bpf/test_progs.c
tools/testing/selftests/bpf/test_progs.h
tools/testing/selftests/bpf/test_tc_tunnel.sh
tools/testing/selftests/bpf/test_verifier.c
tools/testing/selftests/bpf/trace_helpers.c
tools/testing/selftests/bpf/verifier/array_access.c
tools/testing/selftests/bpf/verifier/calls.c
tools/testing/selftests/bpf/verifier/ctx_skb.c
tools/testing/selftests/bpf/verifier/direct_value_access.c [new file with mode: 0644]
tools/testing/selftests/bpf/verifier/ld_dw.c
tools/testing/selftests/bpf/verifier/var_off.c
tools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh
tools/testing/selftests/kvm/Makefile
tools/testing/selftests/kvm/include/kvm_util.h
tools/testing/selftests/kvm/include/x86_64/processor.h
tools/testing/selftests/kvm/lib/kvm_util.c
tools/testing/selftests/kvm/lib/x86_64/processor.c
tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c
tools/testing/selftests/kvm/x86_64/evmcs_test.c
tools/testing/selftests/kvm/x86_64/smm_test.c [new file with mode: 0644]
tools/testing/selftests/kvm/x86_64/state_test.c
tools/testing/selftests/net/fib_tests.sh
tools/testing/selftests/net/forwarding/bridge_igmp.sh [new file with mode: 0755]
tools/testing/selftests/net/pmtu.sh
tools/testing/selftests/netfilter/Makefile
tools/testing/selftests/netfilter/bridge_brouter.sh [new file with mode: 0755]
tools/testing/selftests/netfilter/nft_nat.sh
tools/testing/selftests/tc-testing/tc-tests/actions/sample.json
tools/testing/selftests/tc-testing/tc-tests/filters/tests.json
tools/testing/selftests/tpm2/tpm2.py
tools/testing/selftests/tpm2/tpm2_tests.py
virt/kvm/arm/hyp/vgic-v3-sr.c
virt/kvm/arm/mmu.c
virt/kvm/arm/vgic/vgic-its.c
virt/kvm/arm/vgic/vgic-v3.c
virt/kvm/arm/vgic/vgic.c
virt/kvm/eventfd.c
virt/kvm/irqchip.c
virt/kvm/kvm_main.c

index 3a4c8220df2f721b960f6c2b313b39c032b1c030..2ffd69afc1a8280654a8763e044e27e08be7a69f 100644 (file)
@@ -78,6 +78,8 @@ ForEachMacros:
   - 'ata_qc_for_each_with_internal'
   - 'ax25_for_each'
   - 'ax25_uid_for_each'
+  - '__bio_for_each_bvec'
+  - 'bio_for_each_bvec'
   - 'bio_for_each_integrity_vec'
   - '__bio_for_each_segment'
   - 'bio_for_each_segment'
@@ -118,10 +120,12 @@ ForEachMacros:
   - 'drm_for_each_legacy_plane'
   - 'drm_for_each_plane'
   - 'drm_for_each_plane_mask'
+  - 'drm_for_each_privobj'
   - 'drm_mm_for_each_hole'
   - 'drm_mm_for_each_node'
   - 'drm_mm_for_each_node_in_range'
   - 'drm_mm_for_each_node_safe'
+  - 'flow_action_for_each'
   - 'for_each_active_drhd_unit'
   - 'for_each_active_iommu'
   - 'for_each_available_child_of_node'
@@ -158,6 +162,9 @@ ForEachMacros:
   - 'for_each_dss_dev'
   - 'for_each_efi_memory_desc'
   - 'for_each_efi_memory_desc_in_map'
+  - 'for_each_element'
+  - 'for_each_element_extid'
+  - 'for_each_element_id'
   - 'for_each_endpoint_of_node'
   - 'for_each_evictable_lru'
   - 'for_each_fib6_node_rt_rcu'
@@ -195,6 +202,7 @@ ForEachMacros:
   - 'for_each_net_rcu'
   - 'for_each_new_connector_in_state'
   - 'for_each_new_crtc_in_state'
+  - 'for_each_new_mst_mgr_in_state'
   - 'for_each_new_plane_in_state'
   - 'for_each_new_private_obj_in_state'
   - 'for_each_node'
@@ -210,8 +218,10 @@ ForEachMacros:
   - 'for_each_of_pci_range'
   - 'for_each_old_connector_in_state'
   - 'for_each_old_crtc_in_state'
+  - 'for_each_old_mst_mgr_in_state'
   - 'for_each_oldnew_connector_in_state'
   - 'for_each_oldnew_crtc_in_state'
+  - 'for_each_oldnew_mst_mgr_in_state'
   - 'for_each_oldnew_plane_in_state'
   - 'for_each_oldnew_plane_in_state_reverse'
   - 'for_each_oldnew_private_obj_in_state'
@@ -243,6 +253,9 @@ ForEachMacros:
   - 'for_each_sg_dma_page'
   - 'for_each_sg_page'
   - 'for_each_sibling_event'
+  - 'for_each_subelement'
+  - 'for_each_subelement_extid'
+  - 'for_each_subelement_id'
   - '__for_each_thread'
   - 'for_each_thread'
   - 'for_each_zone'
@@ -252,6 +265,8 @@ ForEachMacros:
   - 'fwnode_for_each_child_node'
   - 'fwnode_graph_for_each_endpoint'
   - 'gadget_for_each_ep'
+  - 'genradix_for_each'
+  - 'genradix_for_each_from'
   - 'hash_for_each'
   - 'hash_for_each_possible'
   - 'hash_for_each_possible_rcu'
@@ -293,7 +308,11 @@ ForEachMacros:
   - 'key_for_each'
   - 'key_for_each_safe'
   - 'klp_for_each_func'
+  - 'klp_for_each_func_safe'
+  - 'klp_for_each_func_static'
   - 'klp_for_each_object'
+  - 'klp_for_each_object_safe'
+  - 'klp_for_each_object_static'
   - 'kvm_for_each_memslot'
   - 'kvm_for_each_vcpu'
   - 'list_for_each'
@@ -324,6 +343,8 @@ ForEachMacros:
   - 'media_device_for_each_intf'
   - 'media_device_for_each_link'
   - 'media_device_for_each_pad'
+  - 'mp_bvec_for_each_page'
+  - 'mp_bvec_for_each_segment'
   - 'nanddev_io_for_each_page'
   - 'netdev_for_each_lower_dev'
   - 'netdev_for_each_lower_private'
@@ -375,6 +396,7 @@ ForEachMacros:
   - 'rht_for_each_rcu'
   - 'rht_for_each_rcu_from'
   - '__rq_for_each_bio'
+  - 'rq_for_each_bvec'
   - 'rq_for_each_segment'
   - 'scsi_for_each_prot_sg'
   - 'scsi_for_each_sg'
@@ -410,6 +432,8 @@ ForEachMacros:
   - 'v4l2_m2m_for_each_src_buf_safe'
   - 'virtio_device_for_each_vq'
   - 'xa_for_each'
+  - 'xa_for_each_marked'
+  - 'xa_for_each_start'
   - 'xas_for_each'
   - 'xas_for_each_conflict'
   - 'xas_for_each_marked'
index 37e1847c798869a62a797840835341c6af48bbb7..a51547ac96f9489d861e5bfac88a74879300d2b5 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -16,6 +16,9 @@ Alan Cox <alan@lxorguk.ukuu.org.uk>
 Alan Cox <root@hraefn.swansea.linux.org.uk>
 Aleksey Gorelov <aleksey_gorelov@phoenix.com>
 Aleksandar Markovic <aleksandar.markovic@mips.com> <aleksandar.markovic@imgtec.com>
+Alexei Starovoitov <ast@kernel.org> <ast@plumgrid.com>
+Alexei Starovoitov <ast@kernel.org> <alexei.starovoitov@gmail.com>
+Alexei Starovoitov <ast@kernel.org> <ast@fb.com>
 Al Viro <viro@ftp.linux.org.uk>
 Al Viro <viro@zenIV.linux.org.uk>
 Andi Shyti <andi@etezian.org> <andi.shyti@samsung.com>
@@ -46,6 +49,12 @@ Christoph Hellwig <hch@lst.de>
 Christophe Ricard <christophe.ricard@gmail.com>
 Corey Minyard <minyard@acm.org>
 Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Daniel Borkmann <daniel@iogearbox.net> <dborkman@redhat.com>
+Daniel Borkmann <daniel@iogearbox.net> <dborkmann@redhat.com>
+Daniel Borkmann <daniel@iogearbox.net> <danborkmann@iogearbox.net>
+Daniel Borkmann <daniel@iogearbox.net> <daniel.borkmann@tik.ee.ethz.ch>
+Daniel Borkmann <daniel@iogearbox.net> <danborkmann@googlemail.com>
+Daniel Borkmann <daniel@iogearbox.net> <dxchgb@gmail.com>
 David Brownell <david-b@pacbell.net>
 David Woodhouse <dwmw2@shinybook.infradead.org>
 Dengcheng Zhu <dzhu@wavecomp.com> <dengcheng.zhu@mips.com>
@@ -156,6 +165,8 @@ Morten Welinder <welinder@darter.rentec.com>
 Morten Welinder <welinder@troll.com>
 Mythri P K <mythripk@ti.com>
 Nguyen Anh Quynh <aquynh@gmail.com>
+Nicolas Pitre <nico@fluxnic.net> <nicolas.pitre@linaro.org>
+Nicolas Pitre <nico@fluxnic.net> <nico@linaro.org>
 Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
 Patrick Mochel <mochel@digitalimplant.org>
 Paul Burton <paul.burton@mips.com> <paul.burton@imgtec.com>
@@ -224,3 +235,5 @@ Yakir Yang <kuankuan.y@gmail.com> <ykk@rock-chips.com>
 Yusuke Goda <goda.yusuke@renesas.com>
 Gustavo Padovan <gustavo@las.ic.unicamp.br>
 Gustavo Padovan <padovan@profusion.mobi>
+Changbin Du <changbin.du@intel.com> <changbin.du@intel.com>
+Changbin Du <changbin.du@intel.com> <changbin.du@gmail.com>
index b8ca28b60215a48f1ee99cfae36f20e0b8d0e8da..7e71c9c1d8e9c7eee70ef957610b483a58739232 100644 (file)
@@ -56,12 +56,12 @@ situation from a state where some tasks are stalled but the CPU is
 still doing productive work. As such, time spent in this subset of the
 stall state is tracked separately and exported in the "full" averages.
 
-The ratios are tracked as recent trends over ten, sixty, and three
-hundred second windows, which gives insight into short term events as
-well as medium and long term trends. The total absolute stall time is
-tracked and exported as well, to allow detection of latency spikes
-which wouldn't necessarily make a dent in the time averages, or to
-average trends over custom time frames.
+The ratios (in %) are tracked as recent trends over ten, sixty, and
+three hundred second windows, which gives insight into short term events
+as well as medium and long term trends. The total absolute stall time
+(in us) is tracked and exported as well, to allow detection of latency
+spikes which wouldn't necessarily make a dent in the time averages,
+or to average trends over custom time frames.
 
 Cgroup2 interface
 =================
index 9a60a5d60e380ab2204334086a58ebef1f3b11e5..29396e6943b036ffa11b0ed5d54cfe35533c48b6 100644 (file)
@@ -82,6 +82,8 @@ sequentially and type id is assigned to each recognized type starting from id
     #define BTF_KIND_RESTRICT       11      /* Restrict     */
     #define BTF_KIND_FUNC           12      /* Function     */
     #define BTF_KIND_FUNC_PROTO     13      /* Function Proto       */
+    #define BTF_KIND_VAR            14      /* Variable     */
+    #define BTF_KIND_DATASEC        15      /* Section      */
 
 Note that the type section encodes debug info, not just pure types.
 ``BTF_KIND_FUNC`` is not a type, and it represents a defined subprogram.
@@ -148,16 +150,16 @@ The ``btf_type.size * 8`` must be equal to or greater than ``BTF_INT_BITS()``
 for the type. The maximum value of ``BTF_INT_BITS()`` is 128.
 
 The ``BTF_INT_OFFSET()`` specifies the starting bit offset to calculate values
-for this int. For example, a bitfield struct member has: * btf member bit
-offset 100 from the start of the structure, * btf member pointing to an int
-type, * the int type has ``BTF_INT_OFFSET() = 2`` and ``BTF_INT_BITS() = 4``
+for this int. For example, a bitfield struct member has:
+ * btf member bit offset 100 from the start of the structure,
+ * btf member pointing to an int type,
+ * the int type has ``BTF_INT_OFFSET() = 2`` and ``BTF_INT_BITS() = 4``
 
 Then in the struct memory layout, this member will occupy ``4`` bits starting
 from bits ``100 + 2 = 102``.
 
 Alternatively, the bitfield struct member can be the following to access the
 same bits as the above:
-
  * btf member bit offset 102,
  * btf member pointing to an int type,
  * the int type has ``BTF_INT_OFFSET() = 0`` and ``BTF_INT_BITS() = 4``
@@ -393,6 +395,61 @@ refers to parameter type.
 If the function has variable arguments, the last parameter is encoded with
 ``name_off = 0`` and ``type = 0``.
 
+2.2.14 BTF_KIND_VAR
+~~~~~~~~~~~~~~~~~~~
+
+``struct btf_type`` encoding requirement:
+  * ``name_off``: offset to a valid C identifier
+  * ``info.kind_flag``: 0
+  * ``info.kind``: BTF_KIND_VAR
+  * ``info.vlen``: 0
+  * ``type``: the type of the variable
+
+``btf_type`` is followed by a single ``struct btf_variable`` with the
+following data::
+
+    struct btf_var {
+        __u32   linkage;
+    };
+
+``struct btf_var`` encoding:
+  * ``linkage``: currently only static variable 0, or globally allocated
+                 variable in ELF sections 1
+
+Not all type of global variables are supported by LLVM at this point.
+The following is currently available:
+
+  * static variables with or without section attributes
+  * global variables with section attributes
+
+The latter is for future extraction of map key/value type id's from a
+map definition.
+
+2.2.15 BTF_KIND_DATASEC
+~~~~~~~~~~~~~~~~~~~~~~~
+
+``struct btf_type`` encoding requirement:
+  * ``name_off``: offset to a valid name associated with a variable or
+                  one of .data/.bss/.rodata
+  * ``info.kind_flag``: 0
+  * ``info.kind``: BTF_KIND_DATASEC
+  * ``info.vlen``: # of variables
+  * ``size``: total section size in bytes (0 at compilation time, patched
+              to actual size by BPF loaders such as libbpf)
+
+``btf_type`` is followed by ``info.vlen`` number of ``struct btf_var_secinfo``.::
+
+    struct btf_var_secinfo {
+        __u32   type;
+        __u32   offset;
+        __u32   size;
+    };
+
+``struct btf_var_secinfo`` encoding:
+  * ``type``: the type of the BTF_KIND_VAR variable
+  * ``offset``: the in-section offset of the variable
+  * ``size``: the size of the variable in bytes
+
 3. BTF Kernel API
 *****************
 
index 365dcf384d73922a22bc70a32e98444122bc4fa7..82dd7582e945461efbdff77c8222bdc1e0e162b9 100644 (file)
@@ -228,7 +228,7 @@ patternProperties:
                 - renesas,r9a06g032-smp
                 - rockchip,rk3036-smp
                 - rockchip,rk3066-smp
-               - socionext,milbeaut-m10v-smp
+                - socionext,milbeaut-m10v-smp
                 - ste,dbx500-smp
 
       cpu-release-addr:
index 08bab0e94d25a21b8ed4d87738641e5ffe74bfa2..d0ae46d7bac370d9db369d9e471ef632be2482d9 100644 (file)
@@ -26,7 +26,7 @@ Required node properties:
 
 Optional node properties:
 
- - ti,mode:     Operation mode (see above).
+ - ti,mode:     Operation mode (u8) (see above).
 
 
 Example (operation mode 2):
@@ -34,5 +34,5 @@ Example (operation mode 2):
        adc128d818@1d {
                compatible = "ti,adc128d818";
                reg = <0x1d>;
-               ti,mode = <2>;
+               ti,mode = /bits/ 8 <2>;
        };
diff --git a/Documentation/devicetree/bindings/net/mdio-mux-meson-g12a.txt b/Documentation/devicetree/bindings/net/mdio-mux-meson-g12a.txt
new file mode 100644 (file)
index 0000000..3a96cbe
--- /dev/null
@@ -0,0 +1,48 @@
+Properties for the MDIO bus multiplexer/glue of Amlogic G12a SoC family.
+
+This is a special case of a MDIO bus multiplexer. It allows to choose between
+the internal mdio bus leading to the embedded 10/100 PHY or the external
+MDIO bus.
+
+Required properties in addition to the generic multiplexer properties:
+- compatible : amlogic,g12a-mdio-mux
+- reg: physical address and length of the multiplexer/glue registers
+- clocks: list of clock phandle, one for each entry clock-names.
+- clock-names: should contain the following:
+  * "pclk"   : peripheral clock.
+  * "clkin0" : platform crytal
+  * "clkin1" : SoC 50MHz MPLL
+
+Example :
+
+mdio_mux: mdio-multiplexer@4c000 {
+       compatible = "amlogic,g12a-mdio-mux";
+       reg = <0x0 0x4c000 0x0 0xa4>;
+       clocks = <&clkc CLKID_ETH_PHY>,
+                <&xtal>,
+                <&clkc CLKID_MPLL_5OM>;
+       clock-names = "pclk", "clkin0", "clkin1";
+       mdio-parent-bus = <&mdio0>;
+       #address-cells = <1>;
+       #size-cells = <0>;
+
+       ext_mdio: mdio@0 {
+               reg = <0>;
+               #address-cells = <1>;
+               #size-cells = <0>;
+       };
+
+       int_mdio: mdio@1 {
+               reg = <1>;
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               internal_ephy: ethernet-phy@8 {
+                       compatible = "ethernet-phy-id0180.3301",
+                                    "ethernet-phy-ieee802.3-c22";
+                       interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>;
+                       reg = <8>;
+                       max-speed = <100>;
+               };
+       };
+};
index 742cb470595ba4d7e2a3e467328d33a8bf5335b5..bcfb13194f16364b0ac77a4391a26c0bb34206d0 100644 (file)
@@ -16,6 +16,7 @@ Required properties:
   * "mediatek,mt8127-uart" for MT8127 compatible UARTS
   * "mediatek,mt8135-uart" for MT8135 compatible UARTS
   * "mediatek,mt8173-uart" for MT8173 compatible UARTS
+  * "mediatek,mt8183-uart", "mediatek,mt6577-uart" for MT8183 compatible UARTS
   * "mediatek,mt6577-uart" for MT6577 and all of the above
 
 - reg: The base address of the UART register bank.
index 944d1965e917e9a91496637ab484d5197d68223d..00ff0cfccfa71cdce0d02ddd8608cf962ee99308 100644 (file)
@@ -12,11 +12,13 @@ CONTENTS
 
  (4) Filesystem context security.
 
- (5) VFS filesystem context operations.
+ (5) VFS filesystem context API.
 
- (6) Parameter description.
+ (6) Superblock creation helpers.
 
- (7) Parameter helper functions.
+ (7) Parameter description.
+
+ (8) Parameter helper functions.
 
 
 ========
@@ -41,12 +43,15 @@ The creation of new mounts is now to be done in a multistep process:
 
  (7) Destroy the context.
 
-To support this, the file_system_type struct gains a new field:
+To support this, the file_system_type struct gains two new fields:
 
        int (*init_fs_context)(struct fs_context *fc);
+       const struct fs_parameter_description *parameters;
 
-which is invoked to set up the filesystem-specific parts of a filesystem
-context, including the additional space.
+The first is invoked to set up the filesystem-specific parts of a filesystem
+context, including the additional space, and the second points to the
+parameter description for validation at registration time and querying by a
+future system call.
 
 Note that security initialisation is done *after* the filesystem is called so
 that the namespaces may be adjusted first.
@@ -73,9 +78,9 @@ context.  This is represented by the fs_context structure:
                void                    *s_fs_info;
                unsigned int            sb_flags;
                unsigned int            sb_flags_mask;
+               unsigned int            s_iflags;
+               unsigned int            lsm_flags;
                enum fs_context_purpose purpose:8;
-               bool                    sloppy:1;
-               bool                    silent:1;
                ...
        };
 
@@ -141,6 +146,10 @@ The fs_context fields are as follows:
 
      Which bits SB_* flags are to be set/cleared in super_block::s_flags.
 
+ (*) unsigned int s_iflags
+
+     These will be bitwise-OR'd with s->s_iflags when a superblock is created.
+
  (*) enum fs_context_purpose
 
      This indicates the purpose for which the context is intended.  The
@@ -150,17 +159,6 @@ The fs_context fields are as follows:
        FS_CONTEXT_FOR_SUBMOUNT         -- New automatic submount of extant mount
        FS_CONTEXT_FOR_RECONFIGURE      -- Change an existing mount
 
- (*) bool sloppy
- (*) bool silent
-
-     These are set if the sloppy or silent mount options are given.
-
-     [NOTE] sloppy is probably unnecessary when userspace passes over one
-     option at a time since the error can just be ignored if userspace deems it
-     to be unimportant.
-
-     [NOTE] silent is probably redundant with sb_flags & SB_SILENT.
-
 The mount context is created by calling vfs_new_fs_context() or
 vfs_dup_fs_context() and is destroyed with put_fs_context().  Note that the
 structure is not refcounted.
@@ -342,28 +340,47 @@ number of operations used by the new mount code for this purpose:
      It should return 0 on success or a negative error code on failure.
 
 
-=================================
-VFS FILESYSTEM CONTEXT OPERATIONS
-=================================
+==========================
+VFS FILESYSTEM CONTEXT API
+==========================
 
-There are four operations for creating a filesystem context and
-one for destroying a context:
+There are four operations for creating a filesystem context and one for
+destroying a context:
 
- (*) struct fs_context *vfs_new_fs_context(struct file_system_type *fs_type,
-                                          struct dentry *reference,
-                                          unsigned int sb_flags,
-                                          unsigned int sb_flags_mask,
-                                          enum fs_context_purpose purpose);
+ (*) struct fs_context *fs_context_for_mount(
+               struct file_system_type *fs_type,
+               unsigned int sb_flags);
 
-     Create a filesystem context for a given filesystem type and purpose.  This
-     allocates the filesystem context, sets the superblock flags, initialises
-     the security and calls fs_type->init_fs_context() to initialise the
-     filesystem private data.
+     Allocate a filesystem context for the purpose of setting up a new mount,
+     whether that be with a new superblock or sharing an existing one.  This
+     sets the superblock flags, initialises the security and calls
+     fs_type->init_fs_context() to initialise the filesystem private data.
 
-     reference can be NULL or it may indicate the root dentry of a superblock
-     that is going to be reconfigured (FS_CONTEXT_FOR_RECONFIGURE) or
-     the automount point that triggered a submount (FS_CONTEXT_FOR_SUBMOUNT).
-     This is provided as a source of namespace information.
+     fs_type specifies the filesystem type that will manage the context and
+     sb_flags presets the superblock flags stored therein.
+
+ (*) struct fs_context *fs_context_for_reconfigure(
+               struct dentry *dentry,
+               unsigned int sb_flags,
+               unsigned int sb_flags_mask);
+
+     Allocate a filesystem context for the purpose of reconfiguring an
+     existing superblock.  dentry provides a reference to the superblock to be
+     configured.  sb_flags and sb_flags_mask indicate which superblock flags
+     need changing and to what.
+
+ (*) struct fs_context *fs_context_for_submount(
+               struct file_system_type *fs_type,
+               struct dentry *reference);
+
+     Allocate a filesystem context for the purpose of creating a new mount for
+     an automount point or other derived superblock.  fs_type specifies the
+     filesystem type that will manage the context and the reference dentry
+     supplies the parameters.  Namespaces are propagated from the reference
+     dentry's superblock also.
+
+     Note that it's not a requirement that the reference dentry be of the same
+     filesystem type as fs_type.
 
  (*) struct fs_context *vfs_dup_fs_context(struct fs_context *src_fc);
 
@@ -390,20 +407,6 @@ context pointer or a negative error code.
 For the remaining operations, if an error occurs, a negative error code will be
 returned.
 
- (*) int vfs_get_tree(struct fs_context *fc);
-
-     Get or create the mountable root and superblock, using the parameters in
-     the filesystem context to select/configure the superblock.  This invokes
-     the ->validate() op and then the ->get_tree() op.
-
-     [NOTE] ->validate() could perhaps be rolled into ->get_tree() and
-     ->reconfigure().
-
- (*) struct vfsmount *vfs_create_mount(struct fs_context *fc);
-
-     Create a mount given the parameters in the specified filesystem context.
-     Note that this does not attach the mount to anything.
-
  (*) int vfs_parse_fs_param(struct fs_context *fc,
                            struct fs_parameter *param);
 
@@ -432,17 +435,80 @@ returned.
      clear the pointer, but then becomes responsible for disposing of the
      object.
 
- (*) int vfs_parse_fs_string(struct fs_context *fc, char *key,
+ (*) int vfs_parse_fs_string(struct fs_context *fc, const char *key,
                             const char *value, size_t v_size);
 
-     A wrapper around vfs_parse_fs_param() that just passes a constant string.
+     A wrapper around vfs_parse_fs_param() that copies the value string it is
+     passed.
 
  (*) int generic_parse_monolithic(struct fs_context *fc, void *data);
 
      Parse a sys_mount() data page, assuming the form to be a text list
      consisting of key[=val] options separated by commas.  Each item in the
      list is passed to vfs_mount_option().  This is the default when the
-     ->parse_monolithic() operation is NULL.
+     ->parse_monolithic() method is NULL.
+
+ (*) int vfs_get_tree(struct fs_context *fc);
+
+     Get or create the mountable root and superblock, using the parameters in
+     the filesystem context to select/configure the superblock.  This invokes
+     the ->get_tree() method.
+
+ (*) struct vfsmount *vfs_create_mount(struct fs_context *fc);
+
+     Create a mount given the parameters in the specified filesystem context.
+     Note that this does not attach the mount to anything.
+
+
+===========================
+SUPERBLOCK CREATION HELPERS
+===========================
+
+A number of VFS helpers are available for use by filesystems for the creation
+or looking up of superblocks.
+
+ (*) struct super_block *
+     sget_fc(struct fs_context *fc,
+            int (*test)(struct super_block *sb, struct fs_context *fc),
+            int (*set)(struct super_block *sb, struct fs_context *fc));
+
+     This is the core routine.  If test is non-NULL, it searches for an
+     existing superblock matching the criteria held in the fs_context, using
+     the test function to match them.  If no match is found, a new superblock
+     is created and the set function is called to set it up.
+
+     Prior to the set function being called, fc->s_fs_info will be transferred
+     to sb->s_fs_info - and fc->s_fs_info will be cleared if set returns
+     success (ie. 0).
+
+The following helpers all wrap sget_fc():
+
+ (*) int vfs_get_super(struct fs_context *fc,
+                      enum vfs_get_super_keying keying,
+                      int (*fill_super)(struct super_block *sb,
+                                        struct fs_context *fc))
+
+     This creates/looks up a deviceless superblock.  The keying indicates how
+     many superblocks of this type may exist and in what manner they may be
+     shared:
+
+       (1) vfs_get_single_super
+
+           Only one such superblock may exist in the system.  Any further
+           attempt to get a new superblock gets this one (and any parameter
+           differences are ignored).
+
+       (2) vfs_get_keyed_super
+
+           Multiple superblocks of this type may exist and they're keyed on
+           their s_fs_info pointer (for example this may refer to a
+           namespace).
+
+       (3) vfs_get_independent_super
+
+           Multiple independent superblocks of this type may exist.  This
+           function never matches an existing one and always creates a new
+           one.
 
 
 =====================
@@ -454,35 +520,22 @@ There's a core description struct that links everything together:
 
        struct fs_parameter_description {
                const char      name[16];
-               u8              nr_params;
-               u8              nr_alt_keys;
-               u8              nr_enums;
-               bool            ignore_unknown;
-               bool            no_source;
-               const char *const *keys;
-               const struct constant_table *alt_keys;
                const struct fs_parameter_spec *specs;
                const struct fs_parameter_enum *enums;
        };
 
 For example:
 
-       enum afs_param {
+       enum {
                Opt_autocell,
                Opt_bar,
                Opt_dyn,
                Opt_foo,
                Opt_source,
-               nr__afs_params
        };
 
        static const struct fs_parameter_description afs_fs_parameters = {
                .name           = "kAFS",
-               .nr_params      = nr__afs_params,
-               .nr_alt_keys    = ARRAY_SIZE(afs_param_alt_keys),
-               .nr_enums       = ARRAY_SIZE(afs_param_enums),
-               .keys           = afs_param_keys,
-               .alt_keys       = afs_param_alt_keys,
                .specs          = afs_param_specs,
                .enums          = afs_param_enums,
        };
@@ -494,28 +547,24 @@ The members are as follows:
      The name to be used in error messages generated by the parse helper
      functions.
 
- (2) u8 nr_params;
-
-     The number of discrete parameter identifiers.  This indicates the number
-     of elements in the ->types[] array and also limits the values that may be
-     used in the values that the ->keys[] array maps to.
-
-     It is expected that, for example, two parameters that are related, say
-     "acl" and "noacl" with have the same ID, but will be flagged to indicate
-     that one is the inverse of the other.  The value can then be picked out
-     from the parse result.
+ (2) const struct fs_parameter_specification *specs;
 
- (3) const struct fs_parameter_specification *specs;
+     Table of parameter specifications, terminated with a null entry, where the
+     entries are of type:
 
-     Table of parameter specifications, where the entries are of type:
-
-       struct fs_parameter_type {
-               enum fs_parameter_spec  type:8;
-               u8                      flags;
+       struct fs_parameter_spec {
+               const char              *name;
+               u8                      opt;
+               enum fs_parameter_type  type:8;
+               unsigned short          flags;
        };
 
-     and the parameter identifier is the index to the array.  'type' indicates
-     the desired value type and must be one of:
+     The 'name' field is a string to match exactly to the parameter key (no
+     wildcards, patterns and no case-independence) and 'opt' is the value that
+     will be returned by the fs_parser() function in the case of a successful
+     match.
+
+     The 'type' field indicates the desired value type and must be one of:
 
        TYPE NAME               EXPECTED VALUE          RESULT IN
        ======================= ======================= =====================
@@ -525,85 +574,65 @@ The members are as follows:
        fs_param_is_u32_octal   32-bit octal int        result->uint_32
        fs_param_is_u32_hex     32-bit hex int          result->uint_32
        fs_param_is_s32         32-bit signed int       result->int_32
+       fs_param_is_u64         64-bit unsigned int     result->uint_64
        fs_param_is_enum        Enum value name         result->uint_32
        fs_param_is_string      Arbitrary string        param->string
        fs_param_is_blob        Binary blob             param->blob
        fs_param_is_blockdev    Blockdev path           * Needs lookup
        fs_param_is_path        Path                    * Needs lookup
-       fs_param_is_fd          File descriptor         param->file
-
-     And each parameter can be qualified with 'flags':
-
-       fs_param_v_optional     The value is optional
-       fs_param_neg_with_no    If key name is prefixed with "no", it is false
-       fs_param_neg_with_empty If value is "", it is false
-       fs_param_deprecated     The parameter is deprecated.
-
-     For example:
-
-       static const struct fs_parameter_spec afs_param_specs[nr__afs_params] = {
-               [Opt_autocell]  = { fs_param_is flag },
-               [Opt_bar]       = { fs_param_is_enum },
-               [Opt_dyn]       = { fs_param_is flag },
-               [Opt_foo]       = { fs_param_is_bool, fs_param_neg_with_no },
-               [Opt_source]    = { fs_param_is_string },
-       };
+       fs_param_is_fd          File descriptor         result->int_32
 
      Note that if the value is of fs_param_is_bool type, fs_parse() will try
      to match any string value against "0", "1", "no", "yes", "false", "true".
 
-     [!] NOTE that the table must be sorted according to primary key name so
-        that ->keys[] is also sorted.
-
- (4) const char *const *keys;
-
-     Table of primary key names for the parameters.  There must be one entry
-     per defined parameter.  The table is optional if ->nr_params is 0.  The
-     table is just an array of names e.g.:
+     Each parameter can also be qualified with 'flags':
 
-       static const char *const afs_param_keys[nr__afs_params] = {
-               [Opt_autocell]  = "autocell",
-               [Opt_bar]       = "bar",
-               [Opt_dyn]       = "dyn",
-               [Opt_foo]       = "foo",
-               [Opt_source]    = "source",
-       };
-
-     [!] NOTE that the table must be sorted such that the table can be searched
-        with bsearch() using strcmp().  This means that the Opt_* values must
-        correspond to the entries in this table.
-
- (5) const struct constant_table *alt_keys;
-     u8 nr_alt_keys;
-
-     Table of additional key names and their mappings to parameter ID plus the
-     number of elements in the table.  This is optional.  The table is just an
-     array of { name, integer } pairs, e.g.:
+       fs_param_v_optional     The value is optional
+       fs_param_neg_with_no    result->negated set if key is prefixed with "no"
+       fs_param_neg_with_empty result->negated set if value is ""
+       fs_param_deprecated     The parameter is deprecated.
 
-       static const struct constant_table afs_param_keys[] = {
-               { "baz",        Opt_bar },
-               { "dynamic",    Opt_dyn },
+     These are wrapped with a number of convenience wrappers:
+
+       MACRO                   SPECIFIES
+       ======================= ===============================================
+       fsparam_flag()          fs_param_is_flag
+       fsparam_flag_no()       fs_param_is_flag, fs_param_neg_with_no
+       fsparam_bool()          fs_param_is_bool
+       fsparam_u32()           fs_param_is_u32
+       fsparam_u32oct()        fs_param_is_u32_octal
+       fsparam_u32hex()        fs_param_is_u32_hex
+       fsparam_s32()           fs_param_is_s32
+       fsparam_u64()           fs_param_is_u64
+       fsparam_enum()          fs_param_is_enum
+       fsparam_string()        fs_param_is_string
+       fsparam_blob()          fs_param_is_blob
+       fsparam_bdev()          fs_param_is_blockdev
+       fsparam_path()          fs_param_is_path
+       fsparam_fd()            fs_param_is_fd
+
+     all of which take two arguments, name string and option number - for
+     example:
+
+       static const struct fs_parameter_spec afs_param_specs[] = {
+               fsparam_flag    ("autocell",    Opt_autocell),
+               fsparam_flag    ("dyn",         Opt_dyn),
+               fsparam_string  ("source",      Opt_source),
+               fsparam_flag_no ("foo",         Opt_foo),
+               {}
        };
 
-     [!] NOTE that the table must be sorted such that strcmp() can be used with
-        bsearch() to search the entries.
-
-     The parameter ID can also be fs_param_key_removed to indicate that a
-     deprecated parameter has been removed and that an error will be given.
-     This differs from fs_param_deprecated where the parameter may still have
-     an effect.
-
-     Further, the behaviour of the parameter may differ when an alternate name
-     is used (for instance with NFS, "v3", "v4.2", etc. are alternate names).
+     An addition macro, __fsparam() is provided that takes an additional pair
+     of arguments to specify the type and the flags for anything that doesn't
+     match one of the above macros.
 
  (6) const struct fs_parameter_enum *enums;
-     u8 nr_enums;
 
-     Table of enum value names to integer mappings and the number of elements
-     stored therein.  This is of type:
+     Table of enum value names to integer mappings, terminated with a null
+     entry.  This is of type:
 
        struct fs_parameter_enum {
-               u8              param_id;
+               u8              opt;
                char            name[14];
                u8              value;
        };
@@ -621,11 +650,6 @@ The members are as follows:
      try to look the value up in the enum table and the result will be stored
      in the parse result.
 
- (7) bool no_source;
-
-     If this is set, fs_parse() will ignore any "source" parameter and not
-     pass it to the filesystem.
-
 The parser should be pointed to by the parser pointer in the file_system_type
 struct as this will provide validation on registration (if
 CONFIG_VALIDATE_FS_PARSER=y) and will allow the description to be queried from
@@ -650,9 +674,8 @@ process the parameters it is given.
                int             value;
        };
 
-     and it must be sorted such that it can be searched using bsearch() using
-     strcmp().  If a match is found, the corresponding value is returned.  If a
-     match isn't found, the not_found value is returned instead.
+     If a match is found, the corresponding value is returned.  If a match
+     isn't found, the not_found value is returned instead.
 
  (*) bool validate_constant_table(const struct constant_table *tbl,
                                  size_t tbl_size,
@@ -665,36 +688,36 @@ process the parameters it is given.
      should just be set to lie inside the low-to-high range.
 
      If all is good, true is returned.  If the table is invalid, errors are
-     logged to dmesg, the stack is dumped and false is returned.
+     logged to dmesg and false is returned.
+
+ (*) bool fs_validate_description(const struct fs_parameter_description *desc);
+
+     This performs some validation checks on a parameter description.  It
+     returns true if the description is good and false if it is not.  It will
+     log errors to dmesg if validation fails.
 
  (*) int fs_parse(struct fs_context *fc,
-                 const struct fs_param_parser *parser,
+                 const struct fs_parameter_description *desc,
                  struct fs_parameter *param,
-                 struct fs_param_parse_result *result);
+                 struct fs_parse_result *result);
 
      This is the main interpreter of parameters.  It uses the parameter
-     description (parser) to look up the name of the parameter to use and to
-     convert that to a parameter ID (stored in result->key).
+     description to look up a parameter by key name and to convert that to an
+     option number (which it returns).
 
      If successful, and if the parameter type indicates the result is a
      boolean, integer or enum type, the value is converted by this function and
-     the result stored in result->{boolean,int_32,uint_32}.
+     the result stored in result->{boolean,int_32,uint_32,uint_64}.
 
      If a match isn't initially made, the key is prefixed with "no" and no
      value is present then an attempt will be made to look up the key with the
      prefix removed.  If this matches a parameter for which the type has flag
-     fs_param_neg_with_no set, then a match will be made and the value will be
-     set to false/0/NULL.
-
-     If the parameter is successfully matched and, optionally, parsed
-     correctly, 1 is returned.  If the parameter isn't matched and
-     parser->ignore_unknown is set, then 0 is returned.  Otherwise -EINVAL is
-     returned.
-
- (*) bool fs_validate_description(const struct fs_parameter_description *desc);
+     fs_param_neg_with_no set, then a match will be made and result->negated
+     will be set to true.
 
-     This is validates the parameter description.  It returns true if the
-     description is good and false if it is not.
+     If the parameter isn't matched, -ENOPARAM will be returned; if the
+     parameter is matched, but the value is erroneous, -EINVAL will be
+     returned; otherwise the parameter's option number will be returned.
 
  (*) int fs_lookup_param(struct fs_context *fc,
                         struct fs_parameter *value,
index d1ee484a787d1b476cf13bcf7d7b53ac084fb63e..ee9984f3586897c870bd42b854f5d883b245621e 100644 (file)
@@ -36,6 +36,7 @@ Supported adapters:
   * Intel Cannon Lake (PCH)
   * Intel Cedar Fork (PCH)
   * Intel Ice Lake (PCH)
+  * Intel Comet Lake (PCH)
    Datasheets: Publicly available at the Intel website
 
 On Intel Patsburg and later chipsets, both the normal host SMBus controller
index f79934225d8d35bd98d3ebd4dcc996324abd66b9..ca983328976bcf36e040ab7508eb29583ed5ca4e 100644 (file)
@@ -102,9 +102,11 @@ Byte sequences
                 dictionary which is empty, and that it will always be
                 invalid at this place.
 
-      17      : bitstream version. If the first byte is 17, the next byte
-                gives the bitstream version (version 1 only). If the first byte
-                is not 17, the bitstream version is 0.
+      17      : bitstream version. If the first byte is 17, and compressed
+                stream length is at least 5 bytes (length of shortest possible
+                versioned bitstream), the next byte gives the bitstream version
+                (version 1 only).
+                Otherwise, the bitstream version is 0.
 
       18..21  : copy 0..3 literals
                 state = (byte - 17) = 0..3  [ copy <state> literals ]
diff --git a/Documentation/networking/bpf_flow_dissector.rst b/Documentation/networking/bpf_flow_dissector.rst
new file mode 100644 (file)
index 0000000..b375ae2
--- /dev/null
@@ -0,0 +1,126 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+==================
+BPF Flow Dissector
+==================
+
+Overview
+========
+
+Flow dissector is a routine that parses metadata out of the packets. It's
+used in the various places in the networking subsystem (RFS, flow hash, etc).
+
+BPF flow dissector is an attempt to reimplement C-based flow dissector logic
+in BPF to gain all the benefits of BPF verifier (namely, limits on the
+number of instructions and tail calls).
+
+API
+===
+
+BPF flow dissector programs operate on an ``__sk_buff``. However, only the
+limited set of fields is allowed: ``data``, ``data_end`` and ``flow_keys``.
+``flow_keys`` is ``struct bpf_flow_keys`` and contains flow dissector input
+and output arguments.
+
+The inputs are:
+  * ``nhoff`` - initial offset of the networking header
+  * ``thoff`` - initial offset of the transport header, initialized to nhoff
+  * ``n_proto`` - L3 protocol type, parsed out of L2 header
+
+Flow dissector BPF program should fill out the rest of the ``struct
+bpf_flow_keys`` fields. Input arguments ``nhoff/thoff/n_proto`` should be
+also adjusted accordingly.
+
+The return code of the BPF program is either BPF_OK to indicate successful
+dissection, or BPF_DROP to indicate parsing error.
+
+__sk_buff->data
+===============
+
+In the VLAN-less case, this is what the initial state of the BPF flow
+dissector looks like::
+
+  +------+------+------------+-----------+
+  | DMAC | SMAC | ETHER_TYPE | L3_HEADER |
+  +------+------+------------+-----------+
+                              ^
+                              |
+                              +-- flow dissector starts here
+
+
+.. code:: c
+
+  skb->data + flow_keys->nhoff point to the first byte of L3_HEADER
+  flow_keys->thoff = nhoff
+  flow_keys->n_proto = ETHER_TYPE
+
+In case of VLAN, flow dissector can be called with the two different states.
+
+Pre-VLAN parsing::
+
+  +------+------+------+-----+-----------+-----------+
+  | DMAC | SMAC | TPID | TCI |ETHER_TYPE | L3_HEADER |
+  +------+------+------+-----+-----------+-----------+
+                        ^
+                        |
+                        +-- flow dissector starts here
+
+.. code:: c
+
+  skb->data + flow_keys->nhoff point the to first byte of TCI
+  flow_keys->thoff = nhoff
+  flow_keys->n_proto = TPID
+
+Please note that TPID can be 802.1AD and, hence, BPF program would
+have to parse VLAN information twice for double tagged packets.
+
+
+Post-VLAN parsing::
+
+  +------+------+------+-----+-----------+-----------+
+  | DMAC | SMAC | TPID | TCI |ETHER_TYPE | L3_HEADER |
+  +------+------+------+-----+-----------+-----------+
+                                          ^
+                                          |
+                                          +-- flow dissector starts here
+
+.. code:: c
+
+  skb->data + flow_keys->nhoff point the to first byte of L3_HEADER
+  flow_keys->thoff = nhoff
+  flow_keys->n_proto = ETHER_TYPE
+
+In this case VLAN information has been processed before the flow dissector
+and BPF flow dissector is not required to handle it.
+
+
+The takeaway here is as follows: BPF flow dissector program can be called with
+the optional VLAN header and should gracefully handle both cases: when single
+or double VLAN is present and when it is not present. The same program
+can be called for both cases and would have to be written carefully to
+handle both cases.
+
+
+Reference Implementation
+========================
+
+See ``tools/testing/selftests/bpf/progs/bpf_flow.c`` for the reference
+implementation and ``tools/testing/selftests/bpf/flow_dissector_load.[hc]``
+for the loader. bpftool can be used to load BPF flow dissector program as well.
+
+The reference implementation is organized as follows:
+  * ``jmp_table`` map that contains sub-programs for each supported L3 protocol
+  * ``_dissect`` routine - entry point; it does input ``n_proto`` parsing and
+    does ``bpf_tail_call`` to the appropriate L3 handler
+
+Since BPF at this point doesn't support looping (or any jumping back),
+jmp_table is used instead to handle multiple levels of encapsulation (and
+IPv6 options).
+
+
+Current Limitations
+===================
+BPF flow dissector doesn't support exporting all the metadata that in-kernel
+C-based implementation can export. Notable example is single VLAN (802.1Q)
+and double VLAN (802.1AD) tags. Please refer to the ``struct bpf_flow_keys``
+for a set of information that's currently can be exported from the BPF context.
index c79ad85933831a257e3b15a8243488d51279f6ff..4316342b77468fa000a83b19c8b36a904185b612 100644 (file)
@@ -41,3 +41,8 @@ fw.ncsi
 
 Version of the software responsible for supporting/handling the
 Network Controller Sideband Interface.
+
+fw.psid
+=======
+
+Unique identifier of the firmware parameter set.
similarity index 83%
rename from Documentation/networking/dsa/bcm_sf2.txt
rename to Documentation/networking/dsa/bcm_sf2.rst
index eba3a2431e914ffb787eaf08d96c16b405f94d51..dee234039e1eccacf2309557dfab8dee910d2f76 100644 (file)
@@ -1,3 +1,4 @@
+=============================================
 Broadcom Starfighter 2 Ethernet switch driver
 =============================================
 
@@ -25,27 +26,27 @@ are connected at a lower speed.
 The switch hardware block is typically interfaced using MMIO accesses and
 contains a bunch of sub-blocks/registers:
 
-* SWITCH_CORE: common switch registers
-* SWITCH_REG: external interfaces switch register
-* SWITCH_MDIO: external MDIO bus controller (there is another one in SWITCH_CORE,
+- ``SWITCH_CORE``: common switch registers
+- ``SWITCH_REG``: external interfaces switch register
+- ``SWITCH_MDIO``: external MDIO bus controller (there is another one in SWITCH_CORE,
   which is used for indirect PHY accesses)
-* SWITCH_INDIR_RW: 64-bits wide register helper block
-* SWITCH_INTRL2_0/1: Level-2 interrupt controllers
-* SWITCH_ACB: Admission control block
-* SWITCH_FCB: Fail-over control block
+- ``SWITCH_INDIR_RW``: 64-bits wide register helper block
+- ``SWITCH_INTRL2_0/1``: Level-2 interrupt controllers
+- ``SWITCH_ACB``: Admission control block
+- ``SWITCH_FCB``: Fail-over control block
 
 Implementation details
 ======================
 
-The driver is located in drivers/net/dsa/bcm_sf2.c and is implemented as a DSA
-driver; see Documentation/networking/dsa/dsa.txt for details on the subsystem
+The driver is located in ``drivers/net/dsa/bcm_sf2.c`` and is implemented as a DSA
+driver; see ``Documentation/networking/dsa/dsa.rst`` for details on the subsystem
 and what it provides.
 
 The SF2 switch is configured to enable a Broadcom specific 4-bytes switch tag
 which gets inserted by the switch for every packet forwarded to the CPU
 interface, conversely, the CPU network interface should insert a similar tag for
 packets entering the CPU port. The tag format is described in
-net/dsa/tag_brcm.c.
+``net/dsa/tag_brcm.c``.
 
 Overall, the SF2 driver is a fairly regular DSA driver; there are a few
 specifics covered below.
@@ -54,7 +55,7 @@ Device Tree probing
 -------------------
 
 The DSA platform device driver is probed using a specific compatible string
-provided in net/dsa/dsa.c. The reason for that is because the DSA subsystem gets
+provided in ``net/dsa/dsa.c``. The reason for that is because the DSA subsystem gets
 registered as a platform device driver currently. DSA will provide the needed
 device_node pointers which are then accessible by the switch driver setup
 function to setup resources such as register ranges and interrupts. This
@@ -70,7 +71,7 @@ Broadcom switches connected to a SF2 require the use of the DSA slave MDIO bus
 in order to properly configure them. By default, the SF2 pseudo-PHY address, and
 an external switch pseudo-PHY address will both be snooping for incoming MDIO
 transactions, since they are at the same address (30), resulting in some kind of
-"double" programming. Using DSA, and setting ds->phys_mii_mask accordingly, we
+"double" programming. Using DSA, and setting ``ds->phys_mii_mask`` accordingly, we
 selectively divert reads and writes towards external Broadcom switches
 pseudo-PHY addresses. Newer revisions of the SF2 hardware have introduced a
 configurable pseudo-PHY address which circumvents the initial design limitation.
@@ -86,7 +87,7 @@ firmware gets reloaded. The SF2 driver relies on such events to properly set its
 MoCA interface carrier state and properly report this to the networking stack.
 
 The MoCA interfaces are supported using the PHY library's fixed PHY/emulated PHY
-device and the switch driver registers a fixed_link_update callback for such
+device and the switch driver registers a ``fixed_link_update`` callback for such
 PHYs which reflects the link state obtained from the interrupt handler.
 
 
similarity index 67%
rename from Documentation/networking/dsa/dsa.txt
rename to Documentation/networking/dsa/dsa.rst
index 43ef767bc4400053f9679d3ba41abbb8eea150a2..ca87068b9ab904a9f9342fdba01d638fef51df8e 100644 (file)
@@ -1,10 +1,8 @@
-Distributed Switch Architecture
-===============================
-
-Introduction
+============
+Architecture
 ============
 
-This document describes the Distributed Switch Architecture (DSA) subsystem
+This document describes the **Distributed Switch Architecture (DSA)** subsystem
 design principles, limitations, interactions with other subsystems, and how to
 develop drivers for this subsystem as well as a TODO for developers interested
 in joining the effort.
@@ -70,11 +68,11 @@ Switch tagging protocols
 DSA currently supports 5 different tagging protocols, and a tag-less mode as
 well. The different protocols are implemented in:
 
-net/dsa/tag_trailer.c: Marvell's 4 trailer tag mode (legacy)
-net/dsa/tag_dsa.c: Marvell's original DSA tag
-net/dsa/tag_edsa.c: Marvell's enhanced DSA tag
-net/dsa/tag_brcm.c: Broadcom's 4 bytes tag
-net/dsa/tag_qca.c: Qualcomm's 2 bytes tag
+- ``net/dsa/tag_trailer.c``: Marvell's 4 trailer tag mode (legacy)
+- ``net/dsa/tag_dsa.c``: Marvell's original DSA tag
+- ``net/dsa/tag_edsa.c``: Marvell's enhanced DSA tag
+- ``net/dsa/tag_brcm.c``: Broadcom's 4 bytes tag
+- ``net/dsa/tag_qca.c``: Qualcomm's 2 bytes tag
 
 The exact format of the tag protocol is vendor specific, but in general, they
 all contain something which:
@@ -89,7 +87,7 @@ Master network devices are regular, unmodified Linux network device drivers for
 the CPU/management Ethernet interface. Such a driver might occasionally need to
 know whether DSA is enabled (e.g.: to enable/disable specific offload features),
 but the DSA subsystem has been proven to work with industry standard drivers:
-e1000e, mv643xx_eth etc. without having to introduce modifications to these
+``e1000e,`` ``mv643xx_eth`` etc. without having to introduce modifications to these
 drivers. Such network devices are also often referred to as conduit network
 devices since they act as a pipe between the host processor and the hardware
 Ethernet switch.
@@ -100,40 +98,42 @@ Networking stack hooks
 When a master netdev is used with DSA, a small hook is placed in in the
 networking stack is in order to have the DSA subsystem process the Ethernet
 switch specific tagging protocol. DSA accomplishes this by registering a
-specific (and fake) Ethernet type (later becoming skb->protocol) with the
-networking stack, this is also known as a ptype or packet_type. A typical
+specific (and fake) Ethernet type (later becoming ``skb->protocol``) with the
+networking stack, this is also known as a ``ptype`` or ``packet_type``. A typical
 Ethernet Frame receive sequence looks like this:
 
 Master network device (e.g.: e1000e):
 
-Receive interrupt fires:
-- receive function is invoked
-- basic packet processing is done: getting length, status etc.
-- packet is prepared to be processed by the Ethernet layer by calling
-  eth_type_trans
+1. Receive interrupt fires:
+
+        - receive function is invoked
+        - basic packet processing is done: getting length, status etc.
+        - packet is prepared to be processed by the Ethernet layer by calling
+          ``eth_type_trans``
+
+2. net/ethernet/eth.c::
+
+          eth_type_trans(skb, dev)
+                  if (dev->dsa_ptr != NULL)
+                          -> skb->protocol = ETH_P_XDSA
 
-net/ethernet/eth.c:
+3. drivers/net/ethernet/\*::
 
-eth_type_trans(skb, dev)
-       if (dev->dsa_ptr != NULL)
-               -> skb->protocol = ETH_P_XDSA
+          netif_receive_skb(skb)
+                  -> iterate over registered packet_type
+                          -> invoke handler for ETH_P_XDSA, calls dsa_switch_rcv()
 
-drivers/net/ethernet/*:
+4. net/dsa/dsa.c::
 
-netif_receive_skb(skb)
-       -> iterate over registered packet_type
-               -> invoke handler for ETH_P_XDSA, calls dsa_switch_rcv()
+          -> dsa_switch_rcv()
+                  -> invoke switch tag specific protocol handler in 'net/dsa/tag_*.c'
 
-net/dsa/dsa.c:
-       -> dsa_switch_rcv()
-               -> invoke switch tag specific protocol handler in
-                  net/dsa/tag_*.c
+5. net/dsa/tag_*.c:
 
-net/dsa/tag_*.c:
-       -> inspect and strip switch tag protocol to determine originating port
-       -> locate per-port network device
-       -> invoke eth_type_trans() with the DSA slave network device
-       -> invoked netif_receive_skb()
+        - inspect and strip switch tag protocol to determine originating port
+        - locate per-port network device
+        - invoke ``eth_type_trans()`` with the DSA slave network device
+        - invoked ``netif_receive_skb()``
 
 Past this point, the DSA slave network devices get delivered regular Ethernet
 frames that can be processed by the networking stack.
@@ -162,7 +162,7 @@ invoke a specific transmit routine which takes care of adding the relevant
 switch tag in the Ethernet frames.
 
 These frames are then queued for transmission using the master network device
-ndo_start_xmit() function, since they contain the appropriate switch tag, the
+``ndo_start_xmit()`` function, since they contain the appropriate switch tag, the
 Ethernet switch will be able to process these incoming frames from the
 management interface and delivers these frames to the physical switch port.
 
@@ -170,23 +170,25 @@ Graphical representation
 ------------------------
 
 Summarized, this is basically how DSA looks like from a network device
-perspective:
-
-
-                       |---------------------------
-                       | CPU network device (eth0)|
-                       ----------------------------
-                       | <tag added by switch     |
-                       |                          |
-                       |                          |
-                       |        tag added by CPU> |
-               |--------------------------------------------|
-               | Switch driver                              |
-               |--------------------------------------------|
-                    ||        ||         ||
-               |-------|  |-------|  |-------|
-               | sw0p0 |  | sw0p1 |  | sw0p2 |
-               |-------|  |-------|  |-------|
+perspective::
+
+
+                |---------------------------
+                | CPU network device (eth0)|
+                ----------------------------
+                | <tag added by switch     |
+                |                          |
+                |                          |
+                |        tag added by CPU> |
+        |--------------------------------------------|
+        |            Switch driver                   |
+        |--------------------------------------------|
+                  ||        ||         ||
+              |-------|  |-------|  |-------|
+              | sw0p0 |  | sw0p1 |  | sw0p2 |
+              |-------|  |-------|  |-------|
+
+
 
 Slave MDIO bus
 --------------
@@ -207,31 +209,32 @@ PHYs, external PHYs, or even external switches.
 Data structures
 ---------------
 
-DSA data structures are defined in include/net/dsa.h as well as
-net/dsa/dsa_priv.h.
+DSA data structures are defined in ``include/net/dsa.h`` as well as
+``net/dsa/dsa_priv.h``:
 
-dsa_chip_data: platform data configuration for a given switch device, this
-structure describes a switch device's parent device, its address, as well as
-various properties of its ports: names/labels, and finally a routing table
-indication (when cascading switches)
+- ``dsa_chip_data``: platform data configuration for a given switch device,
+  this structure describes a switch device's parent device, its address, as
+  well as various properties of its ports: names/labels, and finally a routing
+  table indication (when cascading switches)
 
-dsa_platform_data: platform device configuration data which can reference a
-collection of dsa_chip_data structure if multiples switches are cascaded, the
-master network device this switch tree is attached to needs to be referenced
+- ``dsa_platform_data``: platform device configuration data which can reference
+  a collection of dsa_chip_data structure if multiples switches are cascaded,
+  the master network device this switch tree is attached to needs to be
+  referenced
 
-dsa_switch_tree: structure assigned to the master network device under
-"dsa_ptr", this structure references a dsa_platform_data structure as well as
-the tagging protocol supported by the switch tree, and which receive/transmit
-function hooks should be invoked, information about the directly attached switch
-is also provided: CPU port. Finally, a collection of dsa_switch are referenced
-to address individual switches in the tree.
+- ``dsa_switch_tree``: structure assigned to the master network device under
+  ``dsa_ptr``, this structure references a dsa_platform_data structure as well as
+  the tagging protocol supported by the switch tree, and which receive/transmit
+  function hooks should be invoked, information about the directly attached
+  switch is also provided: CPU port. Finally, a collection of dsa_switch are
+  referenced to address individual switches in the tree.
 
-dsa_switch: structure describing a switch device in the tree, referencing a
-dsa_switch_tree as a backpointer, slave network devices, master network device,
-and a reference to the backing dsa_switch_ops
+- ``dsa_switch``: structure describing a switch device in the tree, referencing
+  a ``dsa_switch_tree`` as a backpointer, slave network devices, master network
+  device, and a reference to the backing``dsa_switch_ops``
 
-dsa_switch_ops: structure referencing function pointers, see below for a full
-description.
+- ``dsa_switch_ops``: structure referencing function pointers, see below for a
+  full description.
 
 Design limitations
 ==================
@@ -240,7 +243,7 @@ Limits on the number of devices and ports
 -----------------------------------------
 
 DSA currently limits the number of maximum switches within a tree to 4
-(DSA_MAX_SWITCHES), and the number of ports per switch to 12 (DSA_MAX_PORTS).
+(``DSA_MAX_SWITCHES``), and the number of ports per switch to 12 (``DSA_MAX_PORTS``).
 These limits could be extended to support larger configurations would this need
 arise.
 
@@ -279,15 +282,15 @@ Interactions with other subsystems
 
 DSA currently leverages the following subsystems:
 
-- MDIO/PHY library: drivers/net/phy/phy.c, mdio_bus.c
-- Switchdev: net/switchdev/*
+- MDIO/PHY library: ``drivers/net/phy/phy.c``, ``mdio_bus.c``
+- Switchdev:``net/switchdev/*``
 - Device Tree for various of_* functions
 
 MDIO/PHY library
 ----------------
 
 Slave network devices exposed by DSA may or may not be interfacing with PHY
-devices (struct phy_device as defined in include/linux/phy.h), but the DSA
+devices (``struct phy_device`` as defined in ``include/linux/phy.h)``, but the DSA
 subsystem deals with all possible combinations:
 
 - internal PHY devices, built into the Ethernet switch hardware
@@ -296,16 +299,16 @@ subsystem deals with all possible combinations:
 - special, non-autonegotiated or non MDIO-managed PHY devices: SFPs, MoCA; a.k.a
   fixed PHYs
 
-The PHY configuration is done by the dsa_slave_phy_setup() function and the
+The PHY configuration is done by the ``dsa_slave_phy_setup()`` function and the
 logic basically looks like this:
 
 - if Device Tree is used, the PHY device is looked up using the standard
   "phy-handle" property, if found, this PHY device is created and registered
-  using of_phy_connect()
+  using ``of_phy_connect()``
 
 - if Device Tree is used, and the PHY device is "fixed", that is, conforms to
   the definition of a non-MDIO managed PHY as defined in
-  Documentation/devicetree/bindings/net/fixed-link.txt, the PHY is registered
+  ``Documentation/devicetree/bindings/net/fixed-link.txt``, the PHY is registered
   and connected transparently using the special fixed MDIO bus driver
 
 - finally, if the PHY is built into the switch, as is very common with
@@ -331,8 +334,8 @@ Device Tree
 -----------
 
 DSA features a standardized binding which is documented in
-Documentation/devicetree/bindings/net/dsa/dsa.txt. PHY/MDIO library helper
-functions such as of_get_phy_mode(), of_phy_connect() are also used to query
+``Documentation/devicetree/bindings/net/dsa/dsa.txt``. PHY/MDIO library helper
+functions such as ``of_get_phy_mode()``, ``of_phy_connect()`` are also used to query
 per-port PHY specific details: interface connection, MDIO bus location etc..
 
 Driver development
@@ -341,8 +344,8 @@ Driver development
 DSA switch drivers need to implement a dsa_switch_ops structure which will
 contain the various members described below.
 
-register_switch_driver() registers this dsa_switch_ops in its internal list
-of drivers to probe for. unregister_switch_driver() does the exact opposite.
+``register_switch_driver()`` registers this dsa_switch_ops in its internal list
+of drivers to probe for. ``unregister_switch_driver()`` does the exact opposite.
 
 Unless requested differently by setting the priv_size member accordingly, DSA
 does not allocate any driver private context space.
@@ -350,17 +353,17 @@ does not allocate any driver private context space.
 Switch configuration
 --------------------
 
-- tag_protocol: this is to indicate what kind of tagging protocol is supported,
-  should be a valid value from the dsa_tag_protocol enum
+- ``tag_protocol``: this is to indicate what kind of tagging protocol is supported,
+  should be a valid value from the ``dsa_tag_protocol`` enum
 
-- probe: probe routine which will be invoked by the DSA platform device upon
+- ``probe``: probe routine which will be invoked by the DSA platform device upon
   registration to test for the presence/absence of a switch device. For MDIO
   devices, it is recommended to issue a read towards internal registers using
   the switch pseudo-PHY and return whether this is a supported device. For other
   buses, return a non-NULL string
 
-- setup: setup function for the switch, this function is responsible for setting
-  up the dsa_switch_ops private structure with all it needs: register maps,
+- ``setup``: setup function for the switch, this function is responsible for setting
+  up the ``dsa_switch_ops`` private structure with all it needs: register maps,
   interrupts, mutexes, locks etc.. This function is also expected to properly
   configure the switch to separate all network interfaces from each other, that
   is, they should be isolated by the switch hardware itself, typically by creating
@@ -375,27 +378,27 @@ Switch configuration
 PHY devices and link management
 -------------------------------
 
-- get_phy_flags: Some switches are interfaced to various kinds of Ethernet PHYs,
+- ``get_phy_flags``: Some switches are interfaced to various kinds of Ethernet PHYs,
   if the PHY library PHY driver needs to know about information it cannot obtain
   on its own (e.g.: coming from switch memory mapped registers), this function
   should return a 32-bits bitmask of "flags", that is private between the switch
-  driver and the Ethernet PHY driver in drivers/net/phy/*.
+  driver and the Ethernet PHY driver in ``drivers/net/phy/\*``.
 
-- phy_read: Function invoked by the DSA slave MDIO bus when attempting to read
+- ``phy_read``: Function invoked by the DSA slave MDIO bus when attempting to read
   the switch port MDIO registers. If unavailable, return 0xffff for each read.
   For builtin switch Ethernet PHYs, this function should allow reading the link
   status, auto-negotiation results, link partner pages etc..
 
-- phy_write: Function invoked by the DSA slave MDIO bus when attempting to write
+- ``phy_write``: Function invoked by the DSA slave MDIO bus when attempting to write
   to the switch port MDIO registers. If unavailable return a negative error
   code.
 
-- adjust_link: Function invoked by the PHY library when a slave network device
+- ``adjust_link``: Function invoked by the PHY library when a slave network device
   is attached to a PHY device. This function is responsible for appropriately
   configuring the switch port link parameters: speed, duplex, pause based on
-  what the phy_device is providing.
+  what the ``phy_device`` is providing.
 
-- fixed_link_update: Function invoked by the PHY library, and specifically by
+- ``fixed_link_update``: Function invoked by the PHY library, and specifically by
   the fixed PHY driver asking the switch driver for link parameters that could
   not be auto-negotiated, or obtained by reading the PHY registers through MDIO.
   This is particularly useful for specific kinds of hardware such as QSGMII,
@@ -405,87 +408,87 @@ PHY devices and link management
 Ethtool operations
 ------------------
 
-- get_strings: ethtool function used to query the driver's strings, will
+- ``get_strings``: ethtool function used to query the driver's strings, will
   typically return statistics strings, private flags strings etc.
 
-- get_ethtool_stats: ethtool function used to query per-port statistics and
+- ``get_ethtool_stats``: ethtool function used to query per-port statistics and
   return their values. DSA overlays slave network devices general statistics:
   RX/TX counters from the network device, with switch driver specific statistics
   per port
 
-- get_sset_count: ethtool function used to query the number of statistics items
+- ``get_sset_count``: ethtool function used to query the number of statistics items
 
-- get_wol: ethtool function used to obtain Wake-on-LAN settings per-port, this
+- ``get_wol``: ethtool function used to obtain Wake-on-LAN settings per-port, this
   function may, for certain implementations also query the master network device
   Wake-on-LAN settings if this interface needs to participate in Wake-on-LAN
 
-- set_wol: ethtool function used to configure Wake-on-LAN settings per-port,
+- ``set_wol``: ethtool function used to configure Wake-on-LAN settings per-port,
   direct counterpart to set_wol with similar restrictions
 
-- set_eee: ethtool function which is used to configure a switch port EEE (Green
+- ``set_eee``: ethtool function which is used to configure a switch port EEE (Green
   Ethernet) settings, can optionally invoke the PHY library to enable EEE at the
   PHY level if relevant. This function should enable EEE at the switch port MAC
   controller and data-processing logic
 
-- get_eee: ethtool function which is used to query a switch port EEE settings,
+- ``get_eee``: ethtool function which is used to query a switch port EEE settings,
   this function should return the EEE state of the switch port MAC controller
   and data-processing logic as well as query the PHY for its currently configured
   EEE settings
 
-- get_eeprom_len: ethtool function returning for a given switch the EEPROM
+- ``get_eeprom_len``: ethtool function returning for a given switch the EEPROM
   length/size in bytes
 
-- get_eeprom: ethtool function returning for a given switch the EEPROM contents
+- ``get_eeprom``: ethtool function returning for a given switch the EEPROM contents
 
-- set_eeprom: ethtool function writing specified data to a given switch EEPROM
+- ``set_eeprom``: ethtool function writing specified data to a given switch EEPROM
 
-- get_regs_len: ethtool function returning the register length for a given
+- ``get_regs_len``: ethtool function returning the register length for a given
   switch
 
-- get_regs: ethtool function returning the Ethernet switch internal register
+- ``get_regs``: ethtool function returning the Ethernet switch internal register
   contents. This function might require user-land code in ethtool to
   pretty-print register values and registers
 
 Power management
 ----------------
 
-- suspend: function invoked by the DSA platform device when the system goes to
+- ``suspend``: function invoked by the DSA platform device when the system goes to
   suspend, should quiesce all Ethernet switch activities, but keep ports
   participating in Wake-on-LAN active as well as additional wake-up logic if
   supported
 
-- resume: function invoked by the DSA platform device when the system resumes,
+- ``resume``: function invoked by the DSA platform device when the system resumes,
   should resume all Ethernet switch activities and re-configure the switch to be
   in a fully active state
 
-- port_enable: function invoked by the DSA slave network device ndo_open
+- ``port_enable``: function invoked by the DSA slave network device ndo_open
   function when a port is administratively brought up, this function should be
   fully enabling a given switch port. DSA takes care of marking the port with
-  BR_STATE_BLOCKING if the port is a bridge member, or BR_STATE_FORWARDING if it
+  ``BR_STATE_BLOCKING`` if the port is a bridge member, or ``BR_STATE_FORWARDING`` if it
   was not, and propagating these changes down to the hardware
 
-- port_disable: function invoked by the DSA slave network device ndo_close
+- ``port_disable``: function invoked by the DSA slave network device ndo_close
   function when a port is administratively brought down, this function should be
   fully disabling a given switch port. DSA takes care of marking the port with
-  BR_STATE_DISABLED and propagating changes to the hardware if this port is
+  ``BR_STATE_DISABLED`` and propagating changes to the hardware if this port is
   disabled while being a bridge member
 
 Bridge layer
 ------------
 
-- port_bridge_join: bridge layer function invoked when a given switch port is
+- ``port_bridge_join``: bridge layer function invoked when a given switch port is
   added to a bridge, this function should be doing the necessary at the switch
   level to permit the joining port from being added to the relevant logical
   domain for it to ingress/egress traffic with other members of the bridge.
 
-- port_bridge_leave: bridge layer function invoked when a given switch port is
+- ``port_bridge_leave``: bridge layer function invoked when a given switch port is
   removed from a bridge, this function should be doing the necessary at the
   switch level to deny the leaving port from ingress/egress traffic from the
   remaining bridge members. When the port leaves the bridge, it should be aged
   out at the switch hardware for the switch to (re) learn MAC addresses behind
   this port.
 
-- port_stp_state_set: bridge layer function invoked when a given switch port STP
+- ``port_stp_state_set``: bridge layer function invoked when a given switch port STP
   state is computed by the bridge layer and should be propagated to switch
   hardware to forward/block/learn traffic. The switch driver is responsible for
   computing a STP state change based on current and asked parameters and perform
@@ -494,7 +497,7 @@ Bridge layer
 Bridge VLAN filtering
 ---------------------
 
-- port_vlan_filtering: bridge layer function invoked when the bridge gets
+- ``port_vlan_filtering``: bridge layer function invoked when the bridge gets
   configured for turning on or off VLAN filtering. If nothing specific needs to
   be done at the hardware level, this callback does not need to be implemented.
   When VLAN filtering is turned on, the hardware must be programmed with
@@ -504,61 +507,61 @@ Bridge VLAN filtering
   accept any 802.1Q frames irrespective of their VLAN ID, and untagged frames are
   allowed.
 
-- port_vlan_prepare: bridge layer function invoked when the bridge prepares the
+- ``port_vlan_prepare``: bridge layer function invoked when the bridge prepares the
   configuration of a VLAN on the given port. If the operation is not supported
-  by the hardware, this function should return -EOPNOTSUPP to inform the bridge
+  by the hardware, this function should return ``-EOPNOTSUPP`` to inform the bridge
   code to fallback to a software implementation. No hardware setup must be done
   in this function. See port_vlan_add for this and details.
 
-- port_vlan_add: bridge layer function invoked when a VLAN is configured
+- ``port_vlan_add``: bridge layer function invoked when a VLAN is configured
   (tagged or untagged) for the given switch port
 
-- port_vlan_del: bridge layer function invoked when a VLAN is removed from the
+- ``port_vlan_del``: bridge layer function invoked when a VLAN is removed from the
   given switch port
 
-- port_vlan_dump: bridge layer function invoked with a switchdev callback
+- ``port_vlan_dump``: bridge layer function invoked with a switchdev callback
   function that the driver has to call for each VLAN the given port is a member
   of. A switchdev object is used to carry the VID and bridge flags.
 
-- port_fdb_add: bridge layer function invoked when the bridge wants to install a
+- ``port_fdb_add``: bridge layer function invoked when the bridge wants to install a
   Forwarding Database entry, the switch hardware should be programmed with the
   specified address in the specified VLAN Id in the forwarding database
   associated with this VLAN ID. If the operation is not supported, this
-  function should return -EOPNOTSUPP to inform the bridge code to fallback to
+  function should return ``-EOPNOTSUPP`` to inform the bridge code to fallback to
   a software implementation.
 
-Note: VLAN ID 0 corresponds to the port private database, which, in the context
-of DSA, would be the its port-based VLAN, used by the associated bridge device.
+.. note:: VLAN ID 0 corresponds to the port private database, which, in the context
+        of DSA, would be the its port-based VLAN, used by the associated bridge device.
 
-- port_fdb_del: bridge layer function invoked when the bridge wants to remove a
+- ``port_fdb_del``: bridge layer function invoked when the bridge wants to remove a
   Forwarding Database entry, the switch hardware should be programmed to delete
   the specified MAC address from the specified VLAN ID if it was mapped into
   this port forwarding database
 
-- port_fdb_dump: bridge layer function invoked with a switchdev callback
+- ``port_fdb_dump``: bridge layer function invoked with a switchdev callback
   function that the driver has to call for each MAC address known to be behind
   the given port. A switchdev object is used to carry the VID and FDB info.
 
-- port_mdb_prepare: bridge layer function invoked when the bridge prepares the
+- ``port_mdb_prepare``: bridge layer function invoked when the bridge prepares the
   installation of a multicast database entry. If the operation is not supported,
-  this function should return -EOPNOTSUPP to inform the bridge code to fallback
+  this function should return ``-EOPNOTSUPP`` to inform the bridge code to fallback
   to a software implementation. No hardware setup must be done in this function.
-  See port_fdb_add for this and details.
+  See ``port_fdb_add`` for this and details.
 
-- port_mdb_add: bridge layer function invoked when the bridge wants to install
+- ``port_mdb_add``: bridge layer function invoked when the bridge wants to install
   a multicast database entry, the switch hardware should be programmed with the
   specified address in the specified VLAN ID in the forwarding database
   associated with this VLAN ID.
 
-Note: VLAN ID 0 corresponds to the port private database, which, in the context
-of DSA, would be the its port-based VLAN, used by the associated bridge device.
+.. note:: VLAN ID 0 corresponds to the port private database, which, in the context
+        of DSA, would be the its port-based VLAN, used by the associated bridge device.
 
-- port_mdb_del: bridge layer function invoked when the bridge wants to remove a
+- ``port_mdb_del``: bridge layer function invoked when the bridge wants to remove a
   multicast database entry, the switch hardware should be programmed to delete
   the specified MAC address from the specified VLAN ID if it was mapped into
   this port forwarding database.
 
-- port_mdb_dump: bridge layer function invoked with a switchdev callback
+- ``port_mdb_dump``: bridge layer function invoked with a switchdev callback
   function that the driver has to call for each MAC address known to be behind
   the given port. A switchdev object is used to carry the VID and MDB info.
 
@@ -577,7 +580,7 @@ two subsystems and get the best of both worlds.
 Other hanging fruits
 --------------------
 
-- making the number of ports fully dynamic and not dependent on DSA_MAX_PORTS
+- making the number of ports fully dynamic and not dependent on ``DSA_MAX_PORTS``
 - allowing more than one CPU/management interface:
   http://comments.gmane.org/gmane.linux.network/365657
 - porting more drivers from other vendors:
diff --git a/Documentation/networking/dsa/index.rst b/Documentation/networking/dsa/index.rst
new file mode 100644 (file)
index 0000000..5c488d3
--- /dev/null
@@ -0,0 +1,10 @@
+===============================
+Distributed Switch Architecture
+===============================
+
+.. toctree::
+   :maxdepth: 1
+
+   dsa
+   bcm_sf2
+   lan9303
similarity index 85%
rename from Documentation/networking/dsa/lan9303.txt
rename to Documentation/networking/dsa/lan9303.rst
index 144b02b95207eeac5590f949447e9f8b5812edf5..e3c820db28ad587f4587edacfd9fd37743a98fb5 100644 (file)
@@ -1,3 +1,4 @@
+==============================
 LAN9303 Ethernet switch driver
 ==============================
 
@@ -9,10 +10,9 @@ host master network interface (e.g. fixed link).
 Driver details
 ==============
 
-The driver is implemented as a DSA driver, see
-Documentation/networking/dsa/dsa.txt.
+The driver is implemented as a DSA driver, see ``Documentation/networking/dsa/dsa.rst``.
 
-See Documentation/devicetree/bindings/net/dsa/lan9303.txt for device tree
+See ``Documentation/devicetree/bindings/net/dsa/lan9303.txt`` for device tree
 binding.
 
 The LAN9303 can be managed both via MDIO and I2C, both supported by this driver.
index 5449149be496fa8448fa5b74bafe2c5c796cb06d..269d6f2661d5865d6848970b068124c9b658e822 100644 (file)
@@ -9,6 +9,7 @@ Contents:
    netdev-FAQ
    af_xdp
    batman-adv
+   bpf_flow_dissector
    can
    can_ucan_protocol
    device_drivers/freescale/dpaa2/index
@@ -24,6 +25,7 @@ Contents:
    device_drivers/intel/i40e
    device_drivers/intel/iavf
    device_drivers/intel/ice
+   dsa/index
    devlink-info-versions
    ieee802154
    kapi
index 2df5894353d6954f5c0dd26d2c149c6e9ee6ee4c..cd7303d7fa25dac9ae38d0e73186f3687b7872a7 100644 (file)
@@ -1009,16 +1009,18 @@ The kernel interface functions are as follows:
 
  (*) Check call still alive.
 
-       u32 rxrpc_kernel_check_life(struct socket *sock,
-                                   struct rxrpc_call *call);
+       bool rxrpc_kernel_check_life(struct socket *sock,
+                                    struct rxrpc_call *call,
+                                    u32 *_life);
        void rxrpc_kernel_probe_life(struct socket *sock,
                                     struct rxrpc_call *call);
 
-     The first function returns a number that is updated when ACKs are received
-     from the peer (notably including PING RESPONSE ACKs which we can elicit by
-     sending PING ACKs to see if the call still exists on the server).  The
-     caller should compare the numbers of two calls to see if the call is still
-     alive after waiting for a suitable interval.
+     The first function passes back in *_life a number that is updated when
+     ACKs are received from the peer (notably including PING RESPONSE ACKs
+     which we can elicit by sending PING ACKs to see if the call still exists
+     on the server).  The caller should compare the numbers of two calls to see
+     if the call is still alive after waiting for a suitable interval.  It also
+     returns true as long as the call hasn't yet reached the completed state.
 
      This allows the caller to work out if the server is still contactable and
      if the call is still alive on the server while waiting for the server to
index 7de9eee73fcd9d533aec2c1bc88d413f6216db73..67068c47c591a5ce8fc373313d46f434863ef54b 100644 (file)
@@ -5,25 +5,32 @@ The Definitive KVM (Kernel-based Virtual Machine) API Documentation
 ----------------------
 
 The kvm API is a set of ioctls that are issued to control various aspects
-of a virtual machine.  The ioctls belong to three classes
+of a virtual machine.  The ioctls belong to three classes:
 
  - System ioctls: These query and set global attributes which affect the
    whole kvm subsystem.  In addition a system ioctl is used to create
-   virtual machines
+   virtual machines.
 
  - VM ioctls: These query and set attributes that affect an entire virtual
    machine, for example memory layout.  In addition a VM ioctl is used to
-   create virtual cpus (vcpus).
+   create virtual cpus (vcpus) and devices.
 
-   Only run VM ioctls from the same process (address space) that was used
-   to create the VM.
+   VM ioctls must be issued from the same process (address space) that was
+   used to create the VM.
 
  - vcpu ioctls: These query and set attributes that control the operation
    of a single virtual cpu.
 
-   Only run vcpu ioctls from the same thread that was used to create the
-   vcpu.
+   vcpu ioctls should be issued from the same thread that was used to create
+   the vcpu, except for asynchronous vcpu ioctl that are marked as such in
+   the documentation.  Otherwise, the first ioctl after switching threads
+   could see a performance impact.
 
+ - device ioctls: These query and set attributes that control the operation
+   of a single device.
+
+   device ioctls must be issued from the same process (address space) that
+   was used to create the VM.
 
 2. File descriptors
 -------------------
@@ -32,17 +39,34 @@ The kvm API is centered around file descriptors.  An initial
 open("/dev/kvm") obtains a handle to the kvm subsystem; this handle
 can be used to issue system ioctls.  A KVM_CREATE_VM ioctl on this
 handle will create a VM file descriptor which can be used to issue VM
-ioctls.  A KVM_CREATE_VCPU ioctl on a VM fd will create a virtual cpu
-and return a file descriptor pointing to it.  Finally, ioctls on a vcpu
-fd can be used to control the vcpu, including the important task of
-actually running guest code.
+ioctls.  A KVM_CREATE_VCPU or KVM_CREATE_DEVICE ioctl on a VM fd will
+create a virtual cpu or device and return a file descriptor pointing to
+the new resource.  Finally, ioctls on a vcpu or device fd can be used
+to control the vcpu or device.  For vcpus, this includes the important
+task of actually running guest code.
 
 In general file descriptors can be migrated among processes by means
 of fork() and the SCM_RIGHTS facility of unix domain socket.  These
 kinds of tricks are explicitly not supported by kvm.  While they will
 not cause harm to the host, their actual behavior is not guaranteed by
-the API.  The only supported use is one virtual machine per process,
-and one vcpu per thread.
+the API.  See "General description" for details on the ioctl usage
+model that is supported by KVM.
+
+It is important to note that althought VM ioctls may only be issued from
+the process that created the VM, a VM's lifecycle is associated with its
+file descriptor, not its creator (process).  In other words, the VM and
+its resources, *including the associated address space*, are not freed
+until the last reference to the VM's file descriptor has been released.
+For example, if fork() is issued after ioctl(KVM_CREATE_VM), the VM will
+not be freed until both the parent (original) process and its child have
+put their references to the VM's file descriptor.
+
+Because a VM's resources are not freed until the last reference to its
+file descriptor is released, creating additional references to a VM via
+via fork(), dup(), etc... without careful consideration is strongly
+discouraged and may have unwanted side effects, e.g. memory allocated
+by and on behalf of the VM's process may not be freed/unaccounted when
+the VM is shut down.
 
 
 It is important to note that althought VM ioctls may only be issued from
@@ -515,11 +539,15 @@ c) KVM_INTERRUPT_SET_LEVEL
 Note that any value for 'irq' other than the ones stated above is invalid
 and incurs unexpected behavior.
 
+This is an asynchronous vcpu ioctl and can be invoked from any thread.
+
 MIPS:
 
 Queues an external interrupt to be injected into the virtual CPU. A negative
 interrupt number dequeues the interrupt.
 
+This is an asynchronous vcpu ioctl and can be invoked from any thread.
+
 
 4.17 KVM_DEBUG_GUEST
 
@@ -1086,14 +1114,12 @@ struct kvm_userspace_memory_region {
 #define KVM_MEM_LOG_DIRTY_PAGES        (1UL << 0)
 #define KVM_MEM_READONLY       (1UL << 1)
 
-This ioctl allows the user to create or modify a guest physical memory
-slot.  When changing an existing slot, it may be moved in the guest
-physical memory space, or its flags may be modified.  It may not be
-resized.  Slots may not overlap in guest physical address space.
-Bits 0-15 of "slot" specifies the slot id and this value should be
-less than the maximum number of user memory slots supported per VM.
-The maximum allowed slots can be queried using KVM_CAP_NR_MEMSLOTS,
-if this capability is supported by the architecture.
+This ioctl allows the user to create, modify or delete a guest physical
+memory slot.  Bits 0-15 of "slot" specify the slot id and this value
+should be less than the maximum number of user memory slots supported per
+VM.  The maximum allowed slots can be queried using KVM_CAP_NR_MEMSLOTS,
+if this capability is supported by the architecture.  Slots may not
+overlap in guest physical address space.
 
 If KVM_CAP_MULTI_ADDRESS_SPACE is available, bits 16-31 of "slot"
 specifies the address space which is being modified.  They must be
@@ -1102,6 +1128,10 @@ KVM_CAP_MULTI_ADDRESS_SPACE capability.  Slots in separate address spaces
 are unrelated; the restriction on overlapping slots only applies within
 each address space.
 
+Deleting a slot is done by passing zero for memory_size.  When changing
+an existing slot, it may be moved in the guest physical memory space,
+or its flags may be modified, but it may not be resized.
+
 Memory for the region is taken starting at the address denoted by the
 field userspace_addr, which must point at user addressable memory for
 the entire memory slot size.  Any object may back this memory, including
@@ -2493,7 +2523,7 @@ KVM_S390_MCHK (vm, vcpu) - machine check interrupt; cr 14 bits in parm,
                            machine checks needing further payload are not
                            supported by this ioctl)
 
-Note that the vcpu ioctl is asynchronous to vcpu execution.
+This is an asynchronous vcpu ioctl and can be invoked from any thread.
 
 4.78 KVM_PPC_GET_HTAB_FD
 
@@ -3042,8 +3072,7 @@ KVM_S390_INT_EMERGENCY - sigp emergency; parameters in .emerg
 KVM_S390_INT_EXTERNAL_CALL - sigp external call; parameters in .extcall
 KVM_S390_MCHK - machine check interrupt; parameters in .mchk
 
-
-Note that the vcpu ioctl is asynchronous to vcpu execution.
+This is an asynchronous vcpu ioctl and can be invoked from any thread.
 
 4.94 KVM_S390_GET_IRQ_STATE
 
index f365102c80f5dd64133cbe60a8a4fd76fc86393d..2efe0efc516e1624e89f3e60def8551bf10c05e7 100644 (file)
@@ -142,7 +142,7 @@ Shadow pages contain the following information:
     If clear, this page corresponds to a guest page table denoted by the gfn
     field.
   role.quadrant:
-    When role.cr4_pae=0, the guest uses 32-bit gptes while the host uses 64-bit
+    When role.gpte_is_8_bytes=0, the guest uses 32-bit gptes while the host uses 64-bit
     sptes.  That means a guest page table contains more ptes than the host,
     so multiple shadow pages are needed to shadow one guest page.
     For first-level shadow pages, role.quadrant can be 0 or 1 and denotes the
@@ -158,9 +158,9 @@ Shadow pages contain the following information:
     The page is invalid and should not be used.  It is a root page that is
     currently pinned (by a cpu hardware register pointing to it); once it is
     unpinned it will be destroyed.
-  role.cr4_pae:
-    Contains the value of cr4.pae for which the page is valid (e.g. whether
-    32-bit or 64-bit gptes are in use).
+  role.gpte_is_8_bytes:
+    Reflects the size of the guest PTE for which the page is valid, i.e. '1'
+    if 64-bit gptes are in use, '0' if 32-bit gptes are in use.
   role.nxe:
     Contains the value of efer.nxe for which the page is valid.
   role.cr0_wp:
@@ -173,6 +173,9 @@ Shadow pages contain the following information:
     Contains the value of cr4.smap && !cr0.wp for which the page is valid
     (pages for which this is true are different from other pages; see the
     treatment of cr0.wp=0 below).
+  role.ept_sp:
+    This is a virtual flag to denote a shadowed nested EPT page.  ept_sp
+    is true if "cr0_wp && smap_andnot_wp", an otherwise invalid combination.
   role.smm:
     Is 1 if the page is valid in system management mode.  This field
     determines which of the kvm_memslots array was used to build this
index c1e2f4070aa56b55254bcc2253809064d5590553..72dfb80e87216498fecfa30e60e21920f3ee388f 100644 (file)
@@ -1893,14 +1893,15 @@ T:      git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-nomadik.git
 ARM/NUVOTON NPCM ARCHITECTURE
 M:     Avi Fishman <avifishman70@gmail.com>
 M:     Tomer Maimon <tmaimon77@gmail.com>
+M:     Tali Perry <tali.perry1@gmail.com>
 R:     Patrick Venture <venture@google.com>
 R:     Nancy Yuen <yuenn@google.com>
-R:     Brendan Higgins <brendanhiggins@google.com>
+R:     Benjamin Fair <benjaminfair@google.com>
 L:     openbmc@lists.ozlabs.org (moderated for non-subscribers)
 S:     Supported
 F:     arch/arm/mach-npcm/
 F:     arch/arm/boot/dts/nuvoton-npcm*
-F:     include/dt-bindings/clock/nuvoton,npcm7xx-clks.h
+F:     include/dt-bindings/clock/nuvoton,npcm7xx-clock.h
 F:     drivers/*/*npcm*
 F:     Documentation/devicetree/bindings/*/*npcm*
 F:     Documentation/devicetree/bindings/*/*/*npcm*
@@ -2356,7 +2357,7 @@ F:        arch/arm/mm/cache-uniphier.c
 F:     arch/arm64/boot/dts/socionext/uniphier*
 F:     drivers/bus/uniphier-system-bus.c
 F:     drivers/clk/uniphier/
-F:     drivers/dmaengine/uniphier-mdmac.c
+F:     drivers/dma/uniphier-mdmac.c
 F:     drivers/gpio/gpio-uniphier.c
 F:     drivers/i2c/busses/i2c-uniphier*
 F:     drivers/irqchip/irq-uniphier-aidet.c
@@ -4132,7 +4133,7 @@ F:        drivers/cpuidle/*
 F:     include/linux/cpuidle.h
 
 CRAMFS FILESYSTEM
-M:     Nicolas Pitre <nico@linaro.org>
+M:     Nicolas Pitre <nico@fluxnic.net>
 S:     Maintained
 F:     Documentation/filesystems/cramfs.txt
 F:     fs/cramfs/
@@ -5836,7 +5837,7 @@ L:        netdev@vger.kernel.org
 S:     Maintained
 F:     Documentation/ABI/testing/sysfs-bus-mdio
 F:     Documentation/devicetree/bindings/net/mdio*
-F:     Documentation/networking/phy.txt
+F:     Documentation/networking/phy.rst
 F:     drivers/net/phy/
 F:     drivers/of/of_mdio.c
 F:     drivers/of/of_net.c
@@ -6411,7 +6412,6 @@ L:        linux-kernel@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git locking/core
 S:     Maintained
 F:     kernel/futex.c
-F:     kernel/futex_compat.c
 F:     include/asm-generic/futex.h
 F:     include/linux/futex.h
 F:     include/uapi/linux/futex.h
@@ -7519,7 +7519,7 @@ F:        include/net/mac802154.h
 F:     include/net/af_ieee802154.h
 F:     include/net/cfg802154.h
 F:     include/net/ieee802154_netdev.h
-F:     Documentation/networking/ieee802154.txt
+F:     Documentation/networking/ieee802154.rst
 
 IFE PROTOCOL
 M:     Yotam Gigi <yotam.gi@gmail.com>
@@ -10139,7 +10139,7 @@ F:      drivers/spi/spi-at91-usart.c
 F:     Documentation/devicetree/bindings/mfd/atmel-usart.txt
 
 MICROCHIP KSZ SERIES ETHERNET SWITCH DRIVER
-M:     Woojung Huh <Woojung.Huh@microchip.com>
+M:     Woojung Huh <woojung.huh@microchip.com>
 M:     Microchip Linux Driver Support <UNGLinuxDriver@microchip.com>
 L:     netdev@vger.kernel.org
 S:     Maintained
@@ -13976,7 +13976,7 @@ F:      drivers/media/rc/serial_ir.c
 SFC NETWORK DRIVER
 M:     Solarflare linux maintainers <linux-net-drivers@solarflare.com>
 M:     Edward Cree <ecree@solarflare.com>
-M:     Bert Kenward <bkenward@solarflare.com>
+M:     Martin Habets <mhabets@solarflare.com>
 L:     netdev@vger.kernel.org
 S:     Supported
 F:     drivers/net/ethernet/sfc/
@@ -16503,7 +16503,7 @@ F:      drivers/char/virtio_console.c
 F:     include/linux/virtio_console.h
 F:     include/uapi/linux/virtio_console.h
 
-VIRTIO CORE, NET AND BLOCK DRIVERS
+VIRTIO CORE AND NET DRIVERS
 M:     "Michael S. Tsirkin" <mst@redhat.com>
 M:     Jason Wang <jasowang@redhat.com>
 L:     virtualization@lists.linux-foundation.org
@@ -16518,6 +16518,19 @@ F:     include/uapi/linux/virtio_*.h
 F:     drivers/crypto/virtio/
 F:     mm/balloon_compaction.c
 
+VIRTIO BLOCK AND SCSI DRIVERS
+M:     "Michael S. Tsirkin" <mst@redhat.com>
+M:     Jason Wang <jasowang@redhat.com>
+R:     Paolo Bonzini <pbonzini@redhat.com>
+R:     Stefan Hajnoczi <stefanha@redhat.com>
+L:     virtualization@lists.linux-foundation.org
+S:     Maintained
+F:     drivers/block/virtio_blk.c
+F:     drivers/scsi/virtio_scsi.c
+F:     include/uapi/linux/virtio_blk.h
+F:     include/uapi/linux/virtio_scsi.h
+F:     drivers/vhost/scsi.c
+
 VIRTIO CRYPTO DRIVER
 M:     Gonglei <arei.gonglei@huawei.com>
 L:     virtualization@lists.linux-foundation.org
index c0a34064c5744e44ca696b3dd5bdb2ea809384e0..6355f82461f41f18e75505dfbb731ea2cb2546bd 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 5
 PATCHLEVEL = 1
 SUBLEVEL = 0
-EXTRAVERSION = -rc2
+EXTRAVERSION = -rc5
 NAME = Shy Crocodile
 
 # *DOCUMENTATION*
@@ -31,26 +31,12 @@ _all:
 # descending is started. They are now explicitly listed as the
 # prepare rule.
 
-# Ugly workaround for Debian make-kpkg:
-# make-kpkg directly includes the top Makefile of Linux kernel. In such a case,
-# skip sub-make to support debian_* targets in ruleset/kernel_version.mk, but
-# displays warning to discourage such abusage.
-ifneq ($(word 2, $(MAKEFILE_LIST)),)
-$(warning Do not include top Makefile of Linux Kernel)
-sub-make-done := 1
-MAKEFLAGS += -rR
-endif
-
-ifneq ($(sub-make-done),1)
+ifneq ($(sub_make_done),1)
 
 # Do not use make's built-in rules and variables
 # (this increases performance and avoids hard-to-debug behaviour)
 MAKEFLAGS += -rR
 
-# 'MAKEFLAGS += -rR' does not become immediately effective for old
-# GNU Make versions. Cancel implicit rules for this Makefile.
-$(lastword $(MAKEFILE_LIST)): ;
-
 # Avoid funny character set dependencies
 unexport LC_ALL
 LC_COLLATE=C
@@ -153,6 +139,7 @@ $(if $(KBUILD_OUTPUT),, \
 # 'sub-make' below.
 MAKEFLAGS += --include-dir=$(CURDIR)
 
+need-sub-make := 1
 else
 
 # Do not print "Entering directory ..." at all for in-tree build.
@@ -160,6 +147,18 @@ MAKEFLAGS += --no-print-directory
 
 endif # ifneq ($(KBUILD_OUTPUT),)
 
+ifneq ($(filter 3.%,$(MAKE_VERSION)),)
+# 'MAKEFLAGS += -rR' does not immediately become effective for GNU Make 3.x
+# We need to invoke sub-make to avoid implicit rules in the top Makefile.
+need-sub-make := 1
+# Cancel implicit rules for this Makefile.
+$(lastword $(MAKEFILE_LIST)): ;
+endif
+
+export sub_make_done := 1
+
+ifeq ($(need-sub-make),1)
+
 PHONY += $(MAKECMDGOALS) sub-make
 
 $(filter-out _all sub-make $(CURDIR)/Makefile, $(MAKECMDGOALS)) _all: sub-make
@@ -167,12 +166,15 @@ $(filter-out _all sub-make $(CURDIR)/Makefile, $(MAKECMDGOALS)) _all: sub-make
 
 # Invoke a second make in the output directory, passing relevant variables
 sub-make:
-       $(Q)$(MAKE) sub-make-done=1 \
+       $(Q)$(MAKE) \
        $(if $(KBUILD_OUTPUT),-C $(KBUILD_OUTPUT) KBUILD_SRC=$(CURDIR)) \
        -f $(CURDIR)/Makefile $(filter-out _all sub-make,$(MAKECMDGOALS))
 
-else # sub-make-done
+endif # need-sub-make
+endif # sub_make_done
+
 # We process the rest of the Makefile if this is the final invocation of make
+ifeq ($(need-sub-make),)
 
 # Do not print "Entering directory ...",
 # but we want to display it when entering to the output directory
@@ -399,6 +401,7 @@ NM          = $(CROSS_COMPILE)nm
 STRIP          = $(CROSS_COMPILE)strip
 OBJCOPY                = $(CROSS_COMPILE)objcopy
 OBJDUMP                = $(CROSS_COMPILE)objdump
+PAHOLE         = pahole
 LEX            = flex
 YACC           = bison
 AWK            = awk
@@ -453,7 +456,7 @@ KBUILD_LDFLAGS :=
 GCC_PLUGINS_CFLAGS :=
 
 export ARCH SRCARCH CONFIG_SHELL HOSTCC KBUILD_HOSTCFLAGS CROSS_COMPILE AS LD CC
-export CPP AR NM STRIP OBJCOPY OBJDUMP KBUILD_HOSTLDFLAGS KBUILD_HOSTLDLIBS
+export CPP AR NM STRIP OBJCOPY OBJDUMP PAHOLE KBUILD_HOSTLDFLAGS KBUILD_HOSTLDLIBS
 export MAKE LEX YACC AWK INSTALLKERNEL PERL PYTHON PYTHON2 PYTHON3 UTS_MACHINE
 export HOSTCXX KBUILD_HOSTCXXFLAGS LDFLAGS_MODULE CHECK CHECKFLAGS
 
@@ -497,7 +500,8 @@ outputmakefile:
 ifneq ($(KBUILD_SRC),)
        $(Q)ln -fsn $(srctree) source
        $(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkmakefile $(srctree)
-       $(Q){ echo "# this is build directory, ignore it"; echo "*"; } > .gitignore
+       $(Q)test -e .gitignore || \
+       { echo "# this is build directory, ignore it"; echo "*"; } > .gitignore
 endif
 
 ifneq ($(shell $(CC) --version 2>&1 | head -n 1 | grep clang),)
@@ -677,7 +681,7 @@ KBUILD_CFLAGS       += $(call cc-disable-warning, format-overflow)
 KBUILD_CFLAGS  += $(call cc-disable-warning, int-in-bool-context)
 
 ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
-KBUILD_CFLAGS  += $(call cc-option,-Oz,-Os)
+KBUILD_CFLAGS  += -Os
 else
 KBUILD_CFLAGS   += -O2
 endif
@@ -950,9 +954,11 @@ mod_sign_cmd = true
 endif
 export mod_sign_cmd
 
+HOST_LIBELF_LIBS = $(shell pkg-config libelf --libs 2>/dev/null || echo -lelf)
+
 ifdef CONFIG_STACK_VALIDATION
   has_libelf := $(call try-run,\
-               echo "int main() {}" | $(HOSTCC) -xc -o /dev/null -lelf -,1,0)
+               echo "int main() {}" | $(HOSTCC) -xc -o /dev/null $(HOST_LIBELF_LIBS) -,1,0)
   ifeq ($(has_libelf),1)
     objtool_target := tools/objtool FORCE
   else
@@ -1757,7 +1763,7 @@ existing-targets := $(wildcard $(sort $(targets)))
 
 endif   # ifeq ($(config-targets),1)
 endif   # ifeq ($(mixed-targets),1)
-endif   # sub-make-done
+endif   # need-sub-make
 
 PHONY += FORCE
 FORCE:
index dc0ab28baca14b5a2eb0aa0064255d023421612c..70b783333965e875a7cb4f0a110327cea46cd663 100644 (file)
@@ -6,6 +6,7 @@ generic-y += exec.h
 generic-y += export.h
 generic-y += fb.h
 generic-y += irq_work.h
+generic-y += kvm_para.h
 generic-y += mcs_spinlock.h
 generic-y += mm-arch-hooks.h
 generic-y += preempt.h
diff --git a/arch/alpha/include/uapi/asm/kvm_para.h b/arch/alpha/include/uapi/asm/kvm_para.h
deleted file mode 100644 (file)
index baacc49..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#include <asm-generic/kvm_para.h>
index b41f8881ecc811f2005b763958e779a42da15c36..decc306a3b52c2b96c2e7af7108e7aa171c51c31 100644 (file)
@@ -11,6 +11,7 @@ generic-y += hardirq.h
 generic-y += hw_irq.h
 generic-y += irq_regs.h
 generic-y += irq_work.h
+generic-y += kvm_para.h
 generic-y += local.h
 generic-y += local64.h
 generic-y += mcs_spinlock.h
index 29de098043064a20112dd7ffd02f86059261ae44..c7a4201ed62ba70f9f37275475be215b0e7fb6d1 100644 (file)
@@ -55,12 +55,11 @@ syscall_set_return_value(struct task_struct *task, struct pt_regs *regs,
  */
 static inline void
 syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
-                     unsigned int i, unsigned int n, unsigned long *args)
+                     unsigned long *args)
 {
        unsigned long *inside_ptregs = &(regs->r0);
-       inside_ptregs -= i;
-
-       BUG_ON((i + n) > 6);
+       unsigned int n = 6;
+       unsigned int i = 0;
 
        while (n--) {
                args[i++] = (*inside_ptregs);
index 755bb11323d8feb92d12349bf444f151cd8acc29..1c72f04ff75da1a7f6918f00b14116a183a79313 100644 (file)
@@ -1,2 +1 @@
-generic-y += kvm_para.h
 generic-y += ucontext.h
index 054ead960f983a99a9f241ce1427fe0e1cd6cb8a..850b4805e2d171436e539b326867d6ce08a6f9d6 100644 (file)
@@ -596,6 +596,7 @@ config ARCH_DAVINCI
        select HAVE_IDE
        select PM_GENERIC_DOMAINS if PM
        select PM_GENERIC_DOMAINS_OF if PM && OF
+       select REGMAP_MMIO
        select RESET_CONTROLLER
        select SPARSE_IRQ
        select USE_OF
index dce5be5df97bd91abe3ff039e8befab58656124b..edcff79879e780e5aa307dfc0d18f393663a7f78 100644 (file)
@@ -57,6 +57,24 @@ wlan_en_reg: fixedregulator2 {
                enable-active-high;
        };
 
+       /* TPS79501 */
+       v1_8d_reg: fixedregulator-v1_8d {
+               compatible = "regulator-fixed";
+               regulator-name = "v1_8d";
+               vin-supply = <&vbat>;
+               regulator-min-microvolt = <1800000>;
+               regulator-max-microvolt = <1800000>;
+       };
+
+       /* TPS79501 */
+       v3_3d_reg: fixedregulator-v3_3d {
+               compatible = "regulator-fixed";
+               regulator-name = "v3_3d";
+               vin-supply = <&vbat>;
+               regulator-min-microvolt = <3300000>;
+               regulator-max-microvolt = <3300000>;
+       };
+
        matrix_keypad: matrix_keypad0 {
                compatible = "gpio-matrix-keypad";
                debounce-delay-ms = <5>;
@@ -499,10 +517,10 @@ tlv320aic3106: tlv320aic3106@1b {
                status = "okay";
 
                /* Regulators */
-               AVDD-supply = <&vaux2_reg>;
-               IOVDD-supply = <&vaux2_reg>;
-               DRVDD-supply = <&vaux2_reg>;
-               DVDD-supply = <&vbat>;
+               AVDD-supply = <&v3_3d_reg>;
+               IOVDD-supply = <&v3_3d_reg>;
+               DRVDD-supply = <&v3_3d_reg>;
+               DVDD-supply = <&v1_8d_reg>;
        };
 };
 
index b128998097ce7180cb2a72291bb83ea2a19d0f52..2c2d8b5b8cf52bf55b28b20a47488363c895681c 100644 (file)
@@ -73,6 +73,24 @@ vtt_fixed: fixedregulator3 {
                enable-active-high;
        };
 
+       /* TPS79518 */
+       v1_8d_reg: fixedregulator-v1_8d {
+               compatible = "regulator-fixed";
+               regulator-name = "v1_8d";
+               vin-supply = <&vbat>;
+               regulator-min-microvolt = <1800000>;
+               regulator-max-microvolt = <1800000>;
+       };
+
+       /* TPS78633 */
+       v3_3d_reg: fixedregulator-v3_3d {
+               compatible = "regulator-fixed";
+               regulator-name = "v3_3d";
+               vin-supply = <&vbat>;
+               regulator-min-microvolt = <3300000>;
+               regulator-max-microvolt = <3300000>;
+       };
+
        leds {
                pinctrl-names = "default";
                pinctrl-0 = <&user_leds_s0>;
@@ -501,10 +519,10 @@ tlv320aic3106: tlv320aic3106@1b {
                status = "okay";
 
                /* Regulators */
-               AVDD-supply = <&vaux2_reg>;
-               IOVDD-supply = <&vaux2_reg>;
-               DRVDD-supply = <&vaux2_reg>;
-               DVDD-supply = <&vbat>;
+               AVDD-supply = <&v3_3d_reg>;
+               IOVDD-supply = <&v3_3d_reg>;
+               DRVDD-supply = <&v3_3d_reg>;
+               DVDD-supply = <&v1_8d_reg>;
        };
 };
 
index f459ec316a22d4cd723d43dd97d3709106aacedd..ca6d9f02a800c8a0e042d43280fa762ebce6fef4 100644 (file)
@@ -1762,7 +1762,7 @@ target-module@cc000 {                     /* 0x481cc000, ap 60 46.0 */
                        reg = <0xcc000 0x4>;
                        reg-names = "rev";
                        /* Domains (P, C): per_pwrdm, l4ls_clkdm */
-                       clocks = <&l4ls_clkctrl AM3_D_CAN0_CLKCTRL 0>;
+                       clocks = <&l4ls_clkctrl AM3_L4LS_D_CAN0_CLKCTRL 0>;
                        clock-names = "fck";
                        #address-cells = <1>;
                        #size-cells = <1>;
@@ -1785,7 +1785,7 @@ target-module@d0000 {                     /* 0x481d0000, ap 62 42.0 */
                        reg = <0xd0000 0x4>;
                        reg-names = "rev";
                        /* Domains (P, C): per_pwrdm, l4ls_clkdm */
-                       clocks = <&l4ls_clkctrl AM3_D_CAN1_CLKCTRL 0>;
+                       clocks = <&l4ls_clkctrl AM3_L4LS_D_CAN1_CLKCTRL 0>;
                        clock-names = "fck";
                        #address-cells = <1>;
                        #size-cells = <1>;
index 5641d162dfdb0c106eed6f7f4dc4f7c120930970..28e7513ce61713a084bc5f91f96cc2426d3f50a8 100644 (file)
@@ -93,7 +93,7 @@ i2s_alt2: i2s_alt2 {
 };
 
 &hdmi {
-       hpd-gpios = <&gpio 46 GPIO_ACTIVE_LOW>;
+       hpd-gpios = <&gpio 46 GPIO_ACTIVE_HIGH>;
 };
 
 &pwm {
index b715ab0fa1ffc09c24e101b4f506b9f9bb900550..e8d800fec63790925701a460afa8415c9706d8dc 100644 (file)
@@ -114,9 +114,9 @@ phy_port3: phy@2 {
                        reg = <2>;
                };
 
-               switch@0 {
+               switch@10 {
                        compatible = "qca,qca8334";
-                       reg = <0>;
+                       reg = <10>;
 
                        switch_ports: ports {
                                #address-cells = <1>;
@@ -125,7 +125,7 @@ switch_ports: ports {
                                ethphy0: port@0 {
                                        reg = <0>;
                                        label = "cpu";
-                                       phy-mode = "rgmii";
+                                       phy-mode = "rgmii-id";
                                        ethernet = <&fec>;
 
                                        fixed-link {
index 1d1b4bd0670ffd094d2939ed9c91095d8ae8ba39..a4217f564a5347a568830e2032dd3fac2ae1c80f 100644 (file)
@@ -264,7 +264,7 @@ &usdhc3 {
        pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
        vmcc-supply = <&reg_sd3_vmmc>;
        cd-gpios = <&gpio1 1 GPIO_ACTIVE_LOW>;
-       bus-witdh = <4>;
+       bus-width = <4>;
        no-1-8-v;
        status = "okay";
 };
@@ -275,7 +275,7 @@ &usdhc4 {
        pinctrl-1 = <&pinctrl_usdhc4_100mhz>;
        pinctrl-2 = <&pinctrl_usdhc4_200mhz>;
        vmcc-supply = <&reg_sd4_vmmc>;
-       bus-witdh = <8>;
+       bus-width = <8>;
        no-1-8-v;
        non-removable;
        status = "okay";
index 433bf09a1954c5ff05e1f3b3255c326fb69bf615..027df06c5dc7d60c9711ebef8b9333e2fe0c9a58 100644 (file)
@@ -91,6 +91,7 @@ &fec {
        pinctrl-0 = <&pinctrl_enet>;
        phy-handle = <&ethphy>;
        phy-mode = "rgmii";
+       phy-reset-duration = <10>; /* in msecs */
        phy-reset-gpios = <&gpio3 23 GPIO_ACTIVE_LOW>;
        phy-supply = <&vdd_eth_io_reg>;
        status = "disabled";
index f6fb6783c1933154049768297372832f68586a04..54cfe72295aa47a278ee8d5ffae5c688b6d8b4fa 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * Copyright (C) 2016 Freescale Semiconductor, Inc.
  * Copyright (C) 2017 NXP
index aa107ee41b8b8f3fbc13b92676224561fe0f92c0..ef653c3209bcc995aaa5d5cd79d0b3cf3fd0f8a0 100644 (file)
@@ -254,6 +254,7 @@ regulator-state-mem {
                        };
 
                        vccio_sd: LDO_REG5 {
+                               regulator-boot-on;
                                regulator-min-microvolt = <1800000>;
                                regulator-max-microvolt = <3300000>;
                                regulator-name = "vccio_sd";
@@ -430,7 +431,7 @@ &sdmmc {
        bus-width = <4>;
        cap-mmc-highspeed;
        cap-sd-highspeed;
-       card-detect-delay = <200>;
+       broken-cd;
        disable-wp;                     /* wp not hooked up */
        pinctrl-names = "default";
        pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_cd &sdmmc_bus4>;
index 0bc2409f6903ffec1512942e9e657d9d983a2c35..192dbc089ade1730b9dce6bca8d356f3b0c83a00 100644 (file)
@@ -25,8 +25,6 @@ memory {
 
        gpio_keys: gpio-keys {
                compatible = "gpio-keys";
-               #address-cells = <1>;
-               #size-cells = <0>;
 
                pinctrl-names = "default";
                pinctrl-0 = <&pwr_key_l>;
index ca7d52daa8fb638641e3b90f633bcf1c1a1e5497..a024d1e7e74cd94eade3e5c00ed9d56353ef93aa 100644 (file)
@@ -70,7 +70,7 @@ cpu1: cpu@501 {
                        compatible = "arm,cortex-a12";
                        reg = <0x501>;
                        resets = <&cru SRST_CORE1>;
-                       operating-points = <&cpu_opp_table>;
+                       operating-points-v2 = <&cpu_opp_table>;
                        #cooling-cells = <2>; /* min followed by max */
                        clock-latency = <40000>;
                        clocks = <&cru ARMCLK>;
@@ -80,7 +80,7 @@ cpu2: cpu@502 {
                        compatible = "arm,cortex-a12";
                        reg = <0x502>;
                        resets = <&cru SRST_CORE2>;
-                       operating-points = <&cpu_opp_table>;
+                       operating-points-v2 = <&cpu_opp_table>;
                        #cooling-cells = <2>; /* min followed by max */
                        clock-latency = <40000>;
                        clocks = <&cru ARMCLK>;
@@ -90,7 +90,7 @@ cpu3: cpu@503 {
                        compatible = "arm,cortex-a12";
                        reg = <0x503>;
                        resets = <&cru SRST_CORE3>;
-                       operating-points = <&cpu_opp_table>;
+                       operating-points-v2 = <&cpu_opp_table>;
                        #cooling-cells = <2>; /* min followed by max */
                        clock-latency = <40000>;
                        clocks = <&cru ARMCLK>;
@@ -1119,8 +1119,6 @@ mipi_dsi: mipi@ff960000 {
                clock-names = "ref", "pclk";
                power-domains = <&power RK3288_PD_VIO>;
                rockchip,grf = <&grf>;
-               #address-cells = <1>;
-               #size-cells = <0>;
                status = "disabled";
 
                ports {
@@ -1282,27 +1280,27 @@ gpu: gpu@ffa30000 {
        gpu_opp_table: gpu-opp-table {
                compatible = "operating-points-v2";
 
-               opp@100000000 {
+               opp-100000000 {
                        opp-hz = /bits/ 64 <100000000>;
                        opp-microvolt = <950000>;
                };
-               opp@200000000 {
+               opp-200000000 {
                        opp-hz = /bits/ 64 <200000000>;
                        opp-microvolt = <950000>;
                };
-               opp@300000000 {
+               opp-300000000 {
                        opp-hz = /bits/ 64 <300000000>;
                        opp-microvolt = <1000000>;
                };
-               opp@400000000 {
+               opp-400000000 {
                        opp-hz = /bits/ 64 <400000000>;
                        opp-microvolt = <1100000>;
                };
-               opp@500000000 {
+               opp-500000000 {
                        opp-hz = /bits/ 64 <500000000>;
                        opp-microvolt = <1200000>;
                };
-               opp@600000000 {
+               opp-600000000 {
                        opp-hz = /bits/ 64 <600000000>;
                        opp-microvolt = <1250000>;
                };
index 1c01a6f843d8a43c07ab25dd18ffb57acd044c0b..28a2e45752fea34eb2efb576439409c0611d9d90 100644 (file)
 #define PIN_PC9__GPIO                  PINMUX_PIN(PIN_PC9, 0, 0)
 #define PIN_PC9__FIQ                   PINMUX_PIN(PIN_PC9, 1, 3)
 #define PIN_PC9__GTSUCOMP              PINMUX_PIN(PIN_PC9, 2, 1)
-#define PIN_PC9__ISC_D0                        PINMUX_PIN(PIN_PC9, 2, 1)
+#define PIN_PC9__ISC_D0                        PINMUX_PIN(PIN_PC9, 3, 1)
 #define PIN_PC9__TIOA4                 PINMUX_PIN(PIN_PC9, 4, 2)
 #define PIN_PC10                       74
 #define PIN_PC10__GPIO                 PINMUX_PIN(PIN_PC10, 0, 0)
index 8661dd9b064a5cdfd4a8801a8b98e9c9f45d7dc0..b37f8e675e4081b200bfd9b9a97d565efce1d1f7 100644 (file)
@@ -170,6 +170,9 @@ CONFIG_IMX_SDMA=y
 # CONFIG_IOMMU_SUPPORT is not set
 CONFIG_IIO=y
 CONFIG_FSL_MX25_ADC=y
+CONFIG_PWM=y
+CONFIG_PWM_IMX1=y
+CONFIG_PWM_IMX27=y
 CONFIG_EXT4_FS=y
 # CONFIG_DNOTIFY is not set
 CONFIG_VFAT_FS=y
index 5586a5074a96b6a84165e32f59ea2fa0800b484a..50fb01d70b1030ca6d2f721b30eaa8078894b589 100644 (file)
@@ -398,7 +398,7 @@ CONFIG_MAG3110=y
 CONFIG_MPL3115=y
 CONFIG_PWM=y
 CONFIG_PWM_FSL_FTM=y
-CONFIG_PWM_IMX=y
+CONFIG_PWM_IMX27=y
 CONFIG_NVMEM_IMX_OCOTP=y
 CONFIG_NVMEM_VF610_OCOTP=y
 CONFIG_TEE=y
index 2de96a180166eb920833b1100159716735f5e206..31de4ab930050cb6e7584fd45747b5e329fc6bfd 100644 (file)
@@ -381,6 +381,17 @@ static inline int kvm_read_guest_lock(struct kvm *kvm,
        return ret;
 }
 
+static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
+                                      const void *data, unsigned long len)
+{
+       int srcu_idx = srcu_read_lock(&kvm->srcu);
+       int ret = kvm_write_guest(kvm, gpa, data, len);
+
+       srcu_read_unlock(&kvm->srcu, srcu_idx);
+
+       return ret;
+}
+
 static inline void *kvm_get_hyp_vector(void)
 {
        switch(read_cpuid_part()) {
index de2089501b8b5705a29bcb80b7007d630cfabc60..9e11dce55e06f4e7359b7b779cc7814ae752c813 100644 (file)
@@ -75,6 +75,8 @@ static inline bool kvm_stage2_has_pud(struct kvm *kvm)
 
 #define S2_PMD_MASK                            PMD_MASK
 #define S2_PMD_SIZE                            PMD_SIZE
+#define S2_PUD_MASK                            PUD_MASK
+#define S2_PUD_SIZE                            PUD_SIZE
 
 static inline bool kvm_stage2_has_pmd(struct kvm *kvm)
 {
index 06dea6bce293b934e1146d26aa316ea8e36e80b5..080ce70cab12a6944af4120ed5a3b9ca9889411d 100644 (file)
@@ -55,53 +55,22 @@ static inline void syscall_set_return_value(struct task_struct *task,
 
 static inline void syscall_get_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         unsigned long *args)
 {
-       if (n == 0)
-               return;
-
-       if (i + n > SYSCALL_MAX_ARGS) {
-               unsigned long *args_bad = args + SYSCALL_MAX_ARGS - i;
-               unsigned int n_bad = n + i - SYSCALL_MAX_ARGS;
-               pr_warn("%s called with max args %d, handling only %d\n",
-                       __func__, i + n, SYSCALL_MAX_ARGS);
-               memset(args_bad, 0, n_bad * sizeof(args[0]));
-               n = SYSCALL_MAX_ARGS - i;
-       }
-
-       if (i == 0) {
-               args[0] = regs->ARM_ORIG_r0;
-               args++;
-               i++;
-               n--;
-       }
-
-       memcpy(args, &regs->ARM_r0 + i, n * sizeof(args[0]));
+       args[0] = regs->ARM_ORIG_r0;
+       args++;
+
+       memcpy(args, &regs->ARM_r0 + 1, 5 * sizeof(args[0]));
 }
 
 static inline void syscall_set_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         const unsigned long *args)
 {
-       if (n == 0)
-               return;
-
-       if (i + n > SYSCALL_MAX_ARGS) {
-               pr_warn("%s called with max args %d, handling only %d\n",
-                       __func__, i + n, SYSCALL_MAX_ARGS);
-               n = SYSCALL_MAX_ARGS - i;
-       }
-
-       if (i == 0) {
-               regs->ARM_ORIG_r0 = args[0];
-               args++;
-               i++;
-               n--;
-       }
-
-       memcpy(&regs->ARM_r0 + i, args, n * sizeof(args[0]));
+       regs->ARM_ORIG_r0 = args[0];
+       args++;
+
+       memcpy(&regs->ARM_r0 + 1, args, 5 * sizeof(args[0]));
 }
 
 static inline int syscall_get_arch(void)
index 23b4464c0995ab3ed1673f7aa36e16a3554e296a..ce8573157774dc078e49a5a2d5ccecc3ad54986d 100644 (file)
@@ -3,3 +3,4 @@
 generated-y += unistd-common.h
 generated-y += unistd-oabi.h
 generated-y += unistd-eabi.h
+generic-y += kvm_para.h
diff --git a/arch/arm/include/uapi/asm/kvm_para.h b/arch/arm/include/uapi/asm/kvm_para.h
deleted file mode 100644 (file)
index baacc49..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#include <asm-generic/kvm_para.h>
index 51e808adb00cc23576b1f9148f0388542627d397..2a757dcaa1a5e9d63b5ae47833ef31d12ab94aa2 100644 (file)
@@ -591,13 +591,13 @@ static int __init at91_pm_backup_init(void)
 
        np = of_find_compatible_node(NULL, NULL, "atmel,sama5d2-securam");
        if (!np)
-               goto securam_fail;
+               goto securam_fail_no_ref_dev;
 
        pdev = of_find_device_by_node(np);
        of_node_put(np);
        if (!pdev) {
                pr_warn("%s: failed to find securam device!\n", __func__);
-               goto securam_fail;
+               goto securam_fail_no_ref_dev;
        }
 
        sram_pool = gen_pool_get(&pdev->dev, NULL);
@@ -620,6 +620,8 @@ static int __init at91_pm_backup_init(void)
        return 0;
 
 securam_fail:
+       put_device(&pdev->dev);
+securam_fail_no_ref_dev:
        iounmap(pm_data.sfrbu);
        pm_data.sfrbu = NULL;
        return ret;
index bfeb25aaf9a2a7a48857a3896fb682d7d94568a8..326e870d712394fad445033defd8e3ff5975ebdd 100644 (file)
 #include "cpuidle.h"
 #include "hardware.h"
 
-static atomic_t master = ATOMIC_INIT(0);
-static DEFINE_SPINLOCK(master_lock);
+static int num_idle_cpus = 0;
+static DEFINE_SPINLOCK(cpuidle_lock);
 
 static int imx6q_enter_wait(struct cpuidle_device *dev,
                            struct cpuidle_driver *drv, int index)
 {
-       if (atomic_inc_return(&master) == num_online_cpus()) {
-               /*
-                * With this lock, we prevent other cpu to exit and enter
-                * this function again and become the master.
-                */
-               if (!spin_trylock(&master_lock))
-                       goto idle;
+       spin_lock(&cpuidle_lock);
+       if (++num_idle_cpus == num_online_cpus())
                imx6_set_lpm(WAIT_UNCLOCKED);
-               cpu_do_idle();
-               imx6_set_lpm(WAIT_CLOCKED);
-               spin_unlock(&master_lock);
-               goto done;
-       }
+       spin_unlock(&cpuidle_lock);
 
-idle:
        cpu_do_idle();
-done:
-       atomic_dec(&master);
+
+       spin_lock(&cpuidle_lock);
+       if (num_idle_cpus-- == num_online_cpus())
+               imx6_set_lpm(WAIT_CLOCKED);
+       spin_unlock(&cpuidle_lock);
 
        return index;
 }
index c7169c2f94c4fd8cc018caa790c7b170e778eaf3..08c7892866c2df48732d15b9aa64329d0b009b75 100644 (file)
@@ -59,6 +59,7 @@ static void __init imx51_m4if_setup(void)
                return;
 
        m4if_base = of_iomap(np, 0);
+       of_node_put(np);
        if (!m4if_base) {
                pr_err("Unable to map M4IF registers\n");
                return;
index 53c316f7301e69fcbebbfe5d73bb48664180f5b6..fe4932fda01d7d0bc819c0ca4e6dcedb6b061081 100644 (file)
@@ -300,7 +300,7 @@ static struct resource iop13xx_adma_2_resources[] = {
        }
 };
 
-static u64 iop13xx_adma_dmamask = DMA_BIT_MASK(64);
+static u64 iop13xx_adma_dmamask = DMA_BIT_MASK(32);
 static struct iop_adma_platform_data iop13xx_adma_0_data = {
        .hw_id = 0,
        .pool_size = PAGE_SIZE,
@@ -324,7 +324,7 @@ static struct platform_device iop13xx_adma_0_channel = {
        .resource = iop13xx_adma_0_resources,
        .dev = {
                .dma_mask = &iop13xx_adma_dmamask,
-               .coherent_dma_mask = DMA_BIT_MASK(64),
+               .coherent_dma_mask = DMA_BIT_MASK(32),
                .platform_data = (void *) &iop13xx_adma_0_data,
        },
 };
@@ -336,7 +336,7 @@ static struct platform_device iop13xx_adma_1_channel = {
        .resource = iop13xx_adma_1_resources,
        .dev = {
                .dma_mask = &iop13xx_adma_dmamask,
-               .coherent_dma_mask = DMA_BIT_MASK(64),
+               .coherent_dma_mask = DMA_BIT_MASK(32),
                .platform_data = (void *) &iop13xx_adma_1_data,
        },
 };
@@ -348,7 +348,7 @@ static struct platform_device iop13xx_adma_2_channel = {
        .resource = iop13xx_adma_2_resources,
        .dev = {
                .dma_mask = &iop13xx_adma_dmamask,
-               .coherent_dma_mask = DMA_BIT_MASK(64),
+               .coherent_dma_mask = DMA_BIT_MASK(32),
                .platform_data = (void *) &iop13xx_adma_2_data,
        },
 };
index db511ec2b1df6824cb6d3d24659cfebe2428d5ec..116feb6b261eb7b0e08ee7ce248e44682e537898 100644 (file)
@@ -152,7 +152,7 @@ static struct resource iop13xx_tpmi_3_resources[] = {
        }
 };
 
-u64 iop13xx_tpmi_mask = DMA_BIT_MASK(64);
+u64 iop13xx_tpmi_mask = DMA_BIT_MASK(32);
 static struct platform_device iop13xx_tpmi_0_device = {
        .name = "iop-tpmi",
        .id = 0,
@@ -160,7 +160,7 @@ static struct platform_device iop13xx_tpmi_0_device = {
        .resource = iop13xx_tpmi_0_resources,
        .dev = {
                .dma_mask          = &iop13xx_tpmi_mask,
-               .coherent_dma_mask = DMA_BIT_MASK(64),
+               .coherent_dma_mask = DMA_BIT_MASK(32),
        },
 };
 
@@ -171,7 +171,7 @@ static struct platform_device iop13xx_tpmi_1_device = {
        .resource = iop13xx_tpmi_1_resources,
        .dev = {
                .dma_mask          = &iop13xx_tpmi_mask,
-               .coherent_dma_mask = DMA_BIT_MASK(64),
+               .coherent_dma_mask = DMA_BIT_MASK(32),
        },
 };
 
@@ -182,7 +182,7 @@ static struct platform_device iop13xx_tpmi_2_device = {
        .resource = iop13xx_tpmi_2_resources,
        .dev = {
                .dma_mask          = &iop13xx_tpmi_mask,
-               .coherent_dma_mask = DMA_BIT_MASK(64),
+               .coherent_dma_mask = DMA_BIT_MASK(32),
        },
 };
 
@@ -193,7 +193,7 @@ static struct platform_device iop13xx_tpmi_3_device = {
        .resource = iop13xx_tpmi_3_resources,
        .dev = {
                .dma_mask          = &iop13xx_tpmi_mask,
-               .coherent_dma_mask = DMA_BIT_MASK(64),
+               .coherent_dma_mask = DMA_BIT_MASK(32),
        },
 };
 
index 591543c81399b4f976b52599cf84b7cb22e512ea..3ea880f5fcb7338b8e419db9fd49c06cee263ca0 100644 (file)
@@ -65,6 +65,7 @@ static void m10v_smp_init(unsigned int max_cpus)
                writel(KERNEL_UNBOOT_FLAG, m10v_smp_base + cpu * 4);
 }
 
+#ifdef CONFIG_HOTPLUG_CPU
 static void m10v_cpu_die(unsigned int l_cpu)
 {
        gic_cpu_if_down(0);
@@ -83,12 +84,15 @@ static int m10v_cpu_kill(unsigned int l_cpu)
 
        return 1;
 }
+#endif
 
 static struct smp_operations m10v_smp_ops __initdata = {
        .smp_prepare_cpus       = m10v_smp_init,
        .smp_boot_secondary     = m10v_boot_secondary,
+#ifdef CONFIG_HOTPLUG_CPU
        .cpu_die                = m10v_cpu_die,
        .cpu_kill               = m10v_cpu_kill,
+#endif
 };
 CPU_METHOD_OF_DECLARE(m10v_smp, "socionext,milbeaut-m10v-smp", &m10v_smp_ops);
 
index be30c3c061b46ee0c1adf3ce55a872eb7bc9c9c0..1b15d593837ed78ea22298ccc4ae60cb3de166f1 100644 (file)
@@ -182,6 +182,7 @@ static struct resource latch1_resources[] = {
 
 static struct bgpio_pdata latch1_pdata = {
        .label  = LATCH1_LABEL,
+       .base   = -1,
        .ngpio  = LATCH1_NGPIO,
 };
 
@@ -219,6 +220,7 @@ static struct resource latch2_resources[] = {
 
 static struct bgpio_pdata latch2_pdata = {
        .label  = LATCH2_LABEL,
+       .base   = -1,
        .ngpio  = LATCH2_NGPIO,
 };
 
index 1444b4b4bd9f85e54368c0e18ac31f3f2fc033eb..439e143cad7b5d4d8ef48122816f9acf436570c3 100644 (file)
@@ -250,8 +250,10 @@ static int __init omapdss_init_of(void)
        if (!node)
                return 0;
 
-       if (!of_device_is_available(node))
+       if (!of_device_is_available(node)) {
+               of_node_put(node);
                return 0;
+       }
 
        pdev = of_find_device_by_node(node);
 
index a4d1f8de3b5b23453ee4738723164a5ba8405424..d9612221e4848971f4ea27cf4f5d4c319073e439 100644 (file)
@@ -143,7 +143,7 @@ struct platform_device iop3xx_dma_0_channel = {
        .resource = iop3xx_dma_0_resources,
        .dev = {
                .dma_mask = &iop3xx_adma_dmamask,
-               .coherent_dma_mask = DMA_BIT_MASK(64),
+               .coherent_dma_mask = DMA_BIT_MASK(32),
                .platform_data = (void *) &iop3xx_dma_0_data,
        },
 };
@@ -155,7 +155,7 @@ struct platform_device iop3xx_dma_1_channel = {
        .resource = iop3xx_dma_1_resources,
        .dev = {
                .dma_mask = &iop3xx_adma_dmamask,
-               .coherent_dma_mask = DMA_BIT_MASK(64),
+               .coherent_dma_mask = DMA_BIT_MASK(32),
                .platform_data = (void *) &iop3xx_dma_1_data,
        },
 };
@@ -167,7 +167,7 @@ struct platform_device iop3xx_aau_channel = {
        .resource = iop3xx_aau_resources,
        .dev = {
                .dma_mask = &iop3xx_adma_dmamask,
-               .coherent_dma_mask = DMA_BIT_MASK(64),
+               .coherent_dma_mask = DMA_BIT_MASK(32),
                .platform_data = (void *) &iop3xx_aau_data,
        },
 };
index a6c81ce00f520625880c29c083b3f70384c3db1f..8647cb80a93bd222234f4951f2249ac0399ca025 100644 (file)
@@ -622,7 +622,7 @@ static struct platform_device orion_xor0_shared = {
        .resource       = orion_xor0_shared_resources,
        .dev            = {
                .dma_mask               = &orion_xor_dmamask,
-               .coherent_dma_mask      = DMA_BIT_MASK(64),
+               .coherent_dma_mask      = DMA_BIT_MASK(32),
                .platform_data          = &orion_xor0_pdata,
        },
 };
@@ -683,7 +683,7 @@ static struct platform_device orion_xor1_shared = {
        .resource       = orion_xor1_shared_resources,
        .dev            = {
                .dma_mask               = &orion_xor_dmamask,
-               .coherent_dma_mask      = DMA_BIT_MASK(64),
+               .coherent_dma_mask      = DMA_BIT_MASK(32),
                .platform_data          = &orion_xor1_pdata,
        },
 };
index 70498a033cf57408ccdefe374c5fa8e1d22e785d..b5ca9c50876d9a23947dde5d7fe553104c9c0805 100644 (file)
@@ -27,6 +27,7 @@ config ARCH_BCM2835
        bool "Broadcom BCM2835 family"
        select TIMER_OF
        select GPIOLIB
+       select MFD_CORE
        select PINCTRL
        select PINCTRL_BCM2835
        select ARM_AMBA
index 7c649f6b14cb6eb73ea8fb23ded74ed5152d3a70..cd7c76e58b09a60f75ccd510083bab730a2378b0 100644 (file)
@@ -162,6 +162,7 @@ gmac0: ethernet@ff800000 {
                        rx-fifo-depth = <16384>;
                        snps,multicast-filter-bins = <256>;
                        iommus = <&smmu 1>;
+                       altr,sysmgr-syscon = <&sysmgr 0x44 0>;
                        status = "disabled";
                };
 
@@ -179,6 +180,7 @@ gmac1: ethernet@ff802000 {
                        rx-fifo-depth = <16384>;
                        snps,multicast-filter-bins = <256>;
                        iommus = <&smmu 2>;
+                       altr,sysmgr-syscon = <&sysmgr 0x48 0>;
                        status = "disabled";
                };
 
@@ -196,6 +198,7 @@ gmac2: ethernet@ff804000 {
                        rx-fifo-depth = <16384>;
                        snps,multicast-filter-bins = <256>;
                        iommus = <&smmu 3>;
+                       altr,sysmgr-syscon = <&sysmgr 0x4c 0>;
                        status = "disabled";
                };
 
index bb2045be8814036ddced1d4a7ec5b42951343832..97aeb946ed5e7473639ec94a498512d48a12ca8b 100644 (file)
@@ -321,7 +321,6 @@ sdmmc4: sdhci@3460000 {
                nvidia,default-trim = <0x9>;
                nvidia,dqs-trim = <63>;
                mmc-hs400-1_8v;
-               supports-cqe;
                status = "disabled";
        };
 
index 61a0afb74e6310b2b4c16bcf9939f6eab7db6258..1ea684af99c4a19b674f2ab90e38680584b09cf4 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * Device Tree Source for the RZ/G2E (R8A774C0) SoC
  *
- * Copyright (C) 2018 Renesas Electronics Corp.
+ * Copyright (C) 2018-2019 Renesas Electronics Corp.
  */
 
 #include <dt-bindings/clock/r8a774c0-cpg-mssr.h>
@@ -1150,9 +1150,8 @@ scif5: serial@e6f30000 {
                                 <&cpg CPG_CORE R8A774C0_CLK_S3D1C>,
                                 <&scif_clk>;
                        clock-names = "fck", "brg_int", "scif_clk";
-                       dmas = <&dmac1 0x5b>, <&dmac1 0x5a>,
-                              <&dmac2 0x5b>, <&dmac2 0x5a>;
-                       dma-names = "tx", "rx", "tx", "rx";
+                       dmas = <&dmac0 0x5b>, <&dmac0 0x5a>;
+                       dma-names = "tx", "rx";
                        power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
                        resets = <&cpg 202>;
                        status = "disabled";
index a69faa60ea4da4bb06a257af39881138a026c6d1..d2ad665fe2d925db040e50d2d9341b5535ddd167 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * Device Tree Source for the R-Car E3 (R8A77990) SoC
  *
- * Copyright (C) 2018 Renesas Electronics Corp.
+ * Copyright (C) 2018-2019 Renesas Electronics Corp.
  */
 
 #include <dt-bindings/clock/r8a77990-cpg-mssr.h>
@@ -1067,9 +1067,8 @@ scif5: serial@e6f30000 {
                                 <&cpg CPG_CORE R8A77990_CLK_S3D1C>,
                                 <&scif_clk>;
                        clock-names = "fck", "brg_int", "scif_clk";
-                       dmas = <&dmac1 0x5b>, <&dmac1 0x5a>,
-                              <&dmac2 0x5b>, <&dmac2 0x5a>;
-                       dma-names = "tx", "rx", "tx", "rx";
+                       dmas = <&dmac0 0x5b>, <&dmac0 0x5a>;
+                       dma-names = "tx", "rx";
                        power-domains = <&sysc R8A77990_PD_ALWAYS_ON>;
                        resets = <&cpg 202>;
                        status = "disabled";
index 33c44e857247e4a64847f22945c4c06bb7735ecf..0e34354b20927698482fddaf6814483394a18b93 100644 (file)
@@ -108,8 +108,8 @@ &gmac2io {
        snps,reset-gpio = <&gpio1 RK_PC2 GPIO_ACTIVE_LOW>;
        snps,reset-active-low;
        snps,reset-delays-us = <0 10000 50000>;
-       tx_delay = <0x25>;
-       rx_delay = <0x11>;
+       tx_delay = <0x24>;
+       rx_delay = <0x18>;
        status = "okay";
 };
 
index 2157a528276bffae23afbaf3152db66292b7817a..79b4d1d4b5d6b67672dcbab1de19d274cecd5c5b 100644 (file)
@@ -46,8 +46,7 @@ vcc_host_5v: vcc-host-5v-regulator {
 
        vcc_host1_5v: vcc_otg_5v: vcc-host1-5v-regulator {
                compatible = "regulator-fixed";
-               enable-active-high;
-               gpio = <&gpio0 RK_PA2 GPIO_ACTIVE_HIGH>;
+               gpio = <&gpio0 RK_PA2 GPIO_ACTIVE_LOW>;
                pinctrl-names = "default";
                pinctrl-0 = <&usb20_host_drv>;
                regulator-name = "vcc_host1_5v";
index 84f14b132e8f5fb80bf3f178a72f5e138d144bd3..dabef1a21649ba44ee4b880d83d9b24591ac1d9d 100644 (file)
@@ -1445,11 +1445,11 @@ sdmmc0m1_gpio: sdmmc0m1-gpio {
 
                sdmmc0 {
                        sdmmc0_clk: sdmmc0-clk {
-                               rockchip,pins = <1 RK_PA6 1 &pcfg_pull_none_4ma>;
+                               rockchip,pins = <1 RK_PA6 1 &pcfg_pull_none_8ma>;
                        };
 
                        sdmmc0_cmd: sdmmc0-cmd {
-                               rockchip,pins = <1 RK_PA4 1 &pcfg_pull_up_4ma>;
+                               rockchip,pins = <1 RK_PA4 1 &pcfg_pull_up_8ma>;
                        };
 
                        sdmmc0_dectn: sdmmc0-dectn {
@@ -1461,14 +1461,14 @@ sdmmc0_wrprt: sdmmc0-wrprt {
                        };
 
                        sdmmc0_bus1: sdmmc0-bus1 {
-                               rockchip,pins = <1 RK_PA0 1 &pcfg_pull_up_4ma>;
+                               rockchip,pins = <1 RK_PA0 1 &pcfg_pull_up_8ma>;
                        };
 
                        sdmmc0_bus4: sdmmc0-bus4 {
-                               rockchip,pins = <1 RK_PA0 1 &pcfg_pull_up_4ma>,
-                                               <1 RK_PA1 1 &pcfg_pull_up_4ma>,
-                                               <1 RK_PA2 1 &pcfg_pull_up_4ma>,
-                                               <1 RK_PA3 1 &pcfg_pull_up_4ma>;
+                               rockchip,pins = <1 RK_PA0 1 &pcfg_pull_up_8ma>,
+                                               <1 RK_PA1 1 &pcfg_pull_up_8ma>,
+                                               <1 RK_PA2 1 &pcfg_pull_up_8ma>,
+                                               <1 RK_PA3 1 &pcfg_pull_up_8ma>;
                        };
 
                        sdmmc0_gpio: sdmmc0-gpio {
@@ -1642,50 +1642,50 @@ gmac-1 {
                        rgmiim1_pins: rgmiim1-pins {
                                rockchip,pins =
                                        /* mac_txclk */
-                                       <1 RK_PB4 2 &pcfg_pull_none_12ma>,
+                                       <1 RK_PB4 2 &pcfg_pull_none_8ma>,
                                        /* mac_rxclk */
-                                       <1 RK_PB5 2 &pcfg_pull_none_2ma>,
+                                       <1 RK_PB5 2 &pcfg_pull_none_4ma>,
                                        /* mac_mdio */
-                                       <1 RK_PC3 2 &pcfg_pull_none_2ma>,
+                                       <1 RK_PC3 2 &pcfg_pull_none_4ma>,
                                        /* mac_txen */
-                                       <1 RK_PD1 2 &pcfg_pull_none_12ma>,
+                                       <1 RK_PD1 2 &pcfg_pull_none_8ma>,
                                        /* mac_clk */
-                                       <1 RK_PC5 2 &pcfg_pull_none_2ma>,
+                                       <1 RK_PC5 2 &pcfg_pull_none_4ma>,
                                        /* mac_rxdv */
-                                       <1 RK_PC6 2 &pcfg_pull_none_2ma>,
+                                       <1 RK_PC6 2 &pcfg_pull_none_4ma>,
                                        /* mac_mdc */
-                                       <1 RK_PC7 2 &pcfg_pull_none_2ma>,
+                                       <1 RK_PC7 2 &pcfg_pull_none_4ma>,
                                        /* mac_rxd1 */
-                                       <1 RK_PB2 2 &pcfg_pull_none_2ma>,
+                                       <1 RK_PB2 2 &pcfg_pull_none_4ma>,
                                        /* mac_rxd0 */
-                                       <1 RK_PB3 2 &pcfg_pull_none_2ma>,
+                                       <1 RK_PB3 2 &pcfg_pull_none_4ma>,
                                        /* mac_txd1 */
-                                       <1 RK_PB0 2 &pcfg_pull_none_12ma>,
+                                       <1 RK_PB0 2 &pcfg_pull_none_8ma>,
                                        /* mac_txd0 */
-                                       <1 RK_PB1 2 &pcfg_pull_none_12ma>,
+                                       <1 RK_PB1 2 &pcfg_pull_none_8ma>,
                                        /* mac_rxd3 */
-                                       <1 RK_PB6 2 &pcfg_pull_none_2ma>,
+                                       <1 RK_PB6 2 &pcfg_pull_none_4ma>,
                                        /* mac_rxd2 */
-                                       <1 RK_PB7 2 &pcfg_pull_none_2ma>,
+                                       <1 RK_PB7 2 &pcfg_pull_none_4ma>,
                                        /* mac_txd3 */
-                                       <1 RK_PC0 2 &pcfg_pull_none_12ma>,
+                                       <1 RK_PC0 2 &pcfg_pull_none_8ma>,
                                        /* mac_txd2 */
-                                       <1 RK_PC1 2 &pcfg_pull_none_12ma>,
+                                       <1 RK_PC1 2 &pcfg_pull_none_8ma>,
 
                                        /* mac_txclk */
-                                       <0 RK_PB0 1 &pcfg_pull_none>,
+                                       <0 RK_PB0 1 &pcfg_pull_none_8ma>,
                                        /* mac_txen */
-                                       <0 RK_PB4 1 &pcfg_pull_none>,
+                                       <0 RK_PB4 1 &pcfg_pull_none_8ma>,
                                        /* mac_clk */
-                                       <0 RK_PD0 1 &pcfg_pull_none>,
+                                       <0 RK_PD0 1 &pcfg_pull_none_4ma>,
                                        /* mac_txd1 */
-                                       <0 RK_PC0 1 &pcfg_pull_none>,
+                                       <0 RK_PC0 1 &pcfg_pull_none_8ma>,
                                        /* mac_txd0 */
-                                       <0 RK_PC1 1 &pcfg_pull_none>,
+                                       <0 RK_PC1 1 &pcfg_pull_none_8ma>,
                                        /* mac_txd3 */
-                                       <0 RK_PC7 1 &pcfg_pull_none>,
+                                       <0 RK_PC7 1 &pcfg_pull_none_8ma>,
                                        /* mac_txd2 */
-                                       <0 RK_PC6 1 &pcfg_pull_none>;
+                                       <0 RK_PC6 1 &pcfg_pull_none_8ma>;
                        };
 
                        rmiim1_pins: rmiim1-pins {
index 4a543f2117d4212b9e26578a64db9ad982ff5c59..844eac939a97c58f9aea4a2e681b39dd6648f4f1 100644 (file)
@@ -158,6 +158,7 @@ &gmac {
 };
 
 &hdmi {
+       ddc-i2c-bus = <&i2c3>;
        pinctrl-names = "default";
        pinctrl-0 = <&hdmi_cec>;
        status = "okay";
index cccb83ad7fa8ea2e1f4251dd724edc62c754771b..e1d95f08f8e127d2e7bf334f94b146669aab79cb 100644 (file)
@@ -30,8 +30,8 @@ do {                                                                  \
 "      prfm    pstl1strm, %2\n"                                        \
 "1:    ldxr    %w1, %2\n"                                              \
        insn "\n"                                                       \
-"2:    stlxr   %w3, %w0, %2\n"                                         \
-"      cbnz    %w3, 1b\n"                                              \
+"2:    stlxr   %w0, %w3, %2\n"                                         \
+"      cbnz    %w0, 1b\n"                                              \
 "      dmb     ish\n"                                                  \
 "3:\n"                                                                 \
 "      .pushsection .fixup,\"ax\"\n"                                   \
@@ -50,30 +50,30 @@ do {                                                                        \
 static inline int
 arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *_uaddr)
 {
-       int oldval = 0, ret, tmp;
+       int oldval, ret, tmp;
        u32 __user *uaddr = __uaccess_mask_ptr(_uaddr);
 
        pagefault_disable();
 
        switch (op) {
        case FUTEX_OP_SET:
-               __futex_atomic_op("mov  %w0, %w4",
+               __futex_atomic_op("mov  %w3, %w4",
                                  ret, oldval, uaddr, tmp, oparg);
                break;
        case FUTEX_OP_ADD:
-               __futex_atomic_op("add  %w0, %w1, %w4",
+               __futex_atomic_op("add  %w3, %w1, %w4",
                                  ret, oldval, uaddr, tmp, oparg);
                break;
        case FUTEX_OP_OR:
-               __futex_atomic_op("orr  %w0, %w1, %w4",
+               __futex_atomic_op("orr  %w3, %w1, %w4",
                                  ret, oldval, uaddr, tmp, oparg);
                break;
        case FUTEX_OP_ANDN:
-               __futex_atomic_op("and  %w0, %w1, %w4",
+               __futex_atomic_op("and  %w3, %w1, %w4",
                                  ret, oldval, uaddr, tmp, ~oparg);
                break;
        case FUTEX_OP_XOR:
-               __futex_atomic_op("eor  %w0, %w1, %w4",
+               __futex_atomic_op("eor  %w3, %w1, %w4",
                                  ret, oldval, uaddr, tmp, oparg);
                break;
        default:
index b0742a16c6c9e43ca73888c2c9778042174328a2..ebeefcf835e8d7f65040fe5eed38ba4693a1f67b 100644 (file)
@@ -445,6 +445,17 @@ static inline int kvm_read_guest_lock(struct kvm *kvm,
        return ret;
 }
 
+static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
+                                      const void *data, unsigned long len)
+{
+       int srcu_idx = srcu_read_lock(&kvm->srcu);
+       int ret = kvm_write_guest(kvm, gpa, data, len);
+
+       srcu_read_unlock(&kvm->srcu, srcu_idx);
+
+       return ret;
+}
+
 #ifdef CONFIG_KVM_INDIRECT_VECTORS
 /*
  * EL2 vectors can be mapped and rerouted in a number of ways,
index 905e1bb0e7bd023b7174da7a6a81459df87b40dc..cd9f4e9d04d3be6564843e821b2a612642717210 100644 (file)
@@ -73,4 +73,9 @@ static inline bool is_forbidden_offset_for_adrp(void *place)
 struct plt_entry get_plt_entry(u64 dst, void *pc);
 bool plt_entries_equal(const struct plt_entry *a, const struct plt_entry *b);
 
+static inline bool plt_entry_is_initialized(const struct plt_entry *e)
+{
+       return e->adrp || e->add || e->br;
+}
+
 #endif /* __ASM_MODULE_H */
index ad8be16a39c9d18bdbd406f522c02432529c4cf6..a179df3674a1aa207dfdead37e47219353b67b91 100644 (file)
@@ -65,52 +65,22 @@ static inline void syscall_set_return_value(struct task_struct *task,
 
 static inline void syscall_get_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         unsigned long *args)
 {
-       if (n == 0)
-               return;
-
-       if (i + n > SYSCALL_MAX_ARGS) {
-               unsigned long *args_bad = args + SYSCALL_MAX_ARGS - i;
-               unsigned int n_bad = n + i - SYSCALL_MAX_ARGS;
-               pr_warning("%s called with max args %d, handling only %d\n",
-                          __func__, i + n, SYSCALL_MAX_ARGS);
-               memset(args_bad, 0, n_bad * sizeof(args[0]));
-       }
-
-       if (i == 0) {
-               args[0] = regs->orig_x0;
-               args++;
-               i++;
-               n--;
-       }
-
-       memcpy(args, &regs->regs[i], n * sizeof(args[0]));
+       args[0] = regs->orig_x0;
+       args++;
+
+       memcpy(args, &regs->regs[1], 5 * sizeof(args[0]));
 }
 
 static inline void syscall_set_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         const unsigned long *args)
 {
-       if (n == 0)
-               return;
-
-       if (i + n > SYSCALL_MAX_ARGS) {
-               pr_warning("%s called with max args %d, handling only %d\n",
-                          __func__, i + n, SYSCALL_MAX_ARGS);
-               n = SYSCALL_MAX_ARGS - i;
-       }
-
-       if (i == 0) {
-               regs->orig_x0 = args[0];
-               args++;
-               i++;
-               n--;
-       }
-
-       memcpy(&regs->regs[i], args, n * sizeof(args[0]));
+       regs->orig_x0 = args[0];
+       args++;
+
+       memcpy(&regs->regs[1], args, 5 * sizeof(args[0]));
 }
 
 /*
index 8e4431a8821f5920e49910dd287db5882e73f330..07b298120182042d2a1dea18160ef63e5a678b9d 100644 (file)
@@ -107,8 +107,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
                trampoline = get_plt_entry(addr, mod->arch.ftrace_trampoline);
                if (!plt_entries_equal(mod->arch.ftrace_trampoline,
                                       &trampoline)) {
-                       if (!plt_entries_equal(mod->arch.ftrace_trampoline,
-                                              &(struct plt_entry){})) {
+                       if (plt_entry_is_initialized(mod->arch.ftrace_trampoline)) {
                                pr_err("ftrace: far branches to multiple entry points unsupported inside a single module\n");
                                return -EINVAL;
                        }
index 5ba4465e44f09028c89fb190b7d65927635a9d10..ea94cf8f9dc6d15f58a7c8e298eba6d8bfdecede 100644 (file)
@@ -94,6 +94,9 @@ static bool on_sdei_normal_stack(unsigned long sp, struct stack_info *info)
        unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_normal_ptr);
        unsigned long high = low + SDEI_STACK_SIZE;
 
+       if (!low)
+               return false;
+
        if (sp < low || sp >= high)
                return false;
 
@@ -111,6 +114,9 @@ static bool on_sdei_critical_stack(unsigned long sp, struct stack_info *info)
        unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_critical_ptr);
        unsigned long high = low + SDEI_STACK_SIZE;
 
+       if (!low)
+               return false;
+
        if (sp < low || sp >= high)
                return false;
 
index f8482fe5a190f47937ee188aa7ff3cbf67bcf2a2..413d566405d175ee882fc4f29a017a6fd39ce0b6 100644 (file)
@@ -217,7 +217,7 @@ static void __init request_standard_resources(void)
 
        num_standard_resources = memblock.memory.cnt;
        res_size = num_standard_resources * sizeof(*standard_resources);
-       standard_resources = memblock_alloc_low(res_size, SMP_CACHE_BYTES);
+       standard_resources = memblock_alloc(res_size, SMP_CACHE_BYTES);
        if (!standard_resources)
                panic("%s: Failed to allocate %zu bytes\n", __func__, res_size);
 
index 8ad119c3f665d4e8001038ccf3bd6dcb62e2e224..29755989f616c187481803b27ca9dbfcf0a7847b 100644 (file)
@@ -102,10 +102,16 @@ static void dump_instr(const char *lvl, struct pt_regs *regs)
 void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
 {
        struct stackframe frame;
-       int skip;
+       int skip = 0;
 
        pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
 
+       if (regs) {
+               if (user_mode(regs))
+                       return;
+               skip = 1;
+       }
+
        if (!tsk)
                tsk = current;
 
@@ -126,7 +132,6 @@ void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
        frame.graph = 0;
 #endif
 
-       skip = !!regs;
        printk("Call trace:\n");
        do {
                /* skip until specified stack frame */
@@ -176,15 +181,13 @@ static int __die(const char *str, int err, struct pt_regs *regs)
                return ret;
 
        print_modules();
-       __show_regs(regs);
        pr_emerg("Process %.*s (pid: %d, stack limit = 0x%p)\n",
                 TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk),
                 end_of_stack(tsk));
+       show_regs(regs);
 
-       if (!user_mode(regs)) {
-               dump_backtrace(regs, tsk);
+       if (!user_mode(regs))
                dump_instr(KERN_EMERG, regs);
-       }
 
        return ret;
 }
index f16a5f8ff2b41fa4284da58d1d2caf7af7b46e7b..e2a0500cd7a27c9ecc5326dd2380f23917309f91 100644 (file)
@@ -123,6 +123,9 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
        int ret = -EINVAL;
        bool loaded;
 
+       /* Reset PMU outside of the non-preemptible section */
+       kvm_pmu_vcpu_reset(vcpu);
+
        preempt_disable();
        loaded = (vcpu->cpu != -1);
        if (loaded)
@@ -170,9 +173,6 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
                vcpu->arch.reset_state.reset = false;
        }
 
-       /* Reset PMU */
-       kvm_pmu_vcpu_reset(vcpu);
-
        /* Default workaround setup is enabled (if supported) */
        if (kvm_arm_have_ssbd() == KVM_SSBD_KERNEL)
                vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG;
index 63b4a170518220397d22ec86cf9911f470139b97..249c9f6f26dce7c2dd2a43f9a9f20bd0d29478f3 100644 (file)
@@ -19,6 +19,7 @@ generic-y += irq_work.h
 generic-y += kdebug.h
 generic-y += kmap_types.h
 generic-y += kprobes.h
+generic-y += kvm_para.h
 generic-y += local.h
 generic-y += mcs_spinlock.h
 generic-y += mm-arch-hooks.h
index ae2be315ee9c98d8440f4409be8b67cc261f8858..15ba8599858e6be5a860e23f338c8bd292da7207 100644 (file)
@@ -46,78 +46,27 @@ static inline void syscall_set_return_value(struct task_struct *task,
 }
 
 static inline void syscall_get_arguments(struct task_struct *task,
-                                        struct pt_regs *regs, unsigned int i,
-                                        unsigned int n, unsigned long *args)
+                                        struct pt_regs *regs,
+                                        unsigned long *args)
 {
-       switch (i) {
-       case 0:
-               if (!n--)
-                       break;
-               *args++ = regs->a4;
-       case 1:
-               if (!n--)
-                       break;
-               *args++ = regs->b4;
-       case 2:
-               if (!n--)
-                       break;
-               *args++ = regs->a6;
-       case 3:
-               if (!n--)
-                       break;
-               *args++ = regs->b6;
-       case 4:
-               if (!n--)
-                       break;
-               *args++ = regs->a8;
-       case 5:
-               if (!n--)
-                       break;
-               *args++ = regs->b8;
-       case 6:
-               if (!n--)
-                       break;
-       default:
-               BUG();
-       }
+       *args++ = regs->a4;
+       *args++ = regs->b4;
+       *args++ = regs->a6;
+       *args++ = regs->b6;
+       *args++ = regs->a8;
+       *args   = regs->b8;
 }
 
 static inline void syscall_set_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         const unsigned long *args)
 {
-       switch (i) {
-       case 0:
-               if (!n--)
-                       break;
-               regs->a4 = *args++;
-       case 1:
-               if (!n--)
-                       break;
-               regs->b4 = *args++;
-       case 2:
-               if (!n--)
-                       break;
-               regs->a6 = *args++;
-       case 3:
-               if (!n--)
-                       break;
-               regs->b6 = *args++;
-       case 4:
-               if (!n--)
-                       break;
-               regs->a8 = *args++;
-       case 5:
-               if (!n--)
-                       break;
-               regs->a9 = *args++;
-       case 6:
-               if (!n)
-                       break;
-       default:
-               BUG();
-       }
+       regs->a4 = *args++;
+       regs->b4 = *args++;
+       regs->a6 = *args++;
+       regs->b6 = *args++;
+       regs->a8 = *args++;
+       regs->a9 = *args;
 }
 
 #endif /* __ASM_C6X_SYSCALLS_H */
index 755bb11323d8feb92d12349bf444f151cd8acc29..1c72f04ff75da1a7f6918f00b14116a183a79313 100644 (file)
@@ -1,2 +1 @@
-generic-y += kvm_para.h
 generic-y += ucontext.h
index d637445737b78fd5c78c9994173a1e7c73eb3d1f..bda0a446c63ead759d9360d769fe68647a28f83d 100644 (file)
@@ -43,30 +43,20 @@ syscall_set_return_value(struct task_struct *task, struct pt_regs *regs,
 
 static inline void
 syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
-                     unsigned int i, unsigned int n, unsigned long *args)
+                     unsigned long *args)
 {
-       BUG_ON(i + n > 6);
-       if (i == 0) {
-               args[0] = regs->orig_a0;
-               args++;
-               i++;
-               n--;
-       }
-       memcpy(args, &regs->a1 + i * sizeof(regs->a1), n * sizeof(args[0]));
+       args[0] = regs->orig_a0;
+       args++;
+       memcpy(args, &regs->a1, 5 * sizeof(args[0]));
 }
 
 static inline void
 syscall_set_arguments(struct task_struct *task, struct pt_regs *regs,
-                     unsigned int i, unsigned int n, const unsigned long *args)
+                     const unsigned long *args)
 {
-       BUG_ON(i + n > 6);
-       if (i == 0) {
-               regs->orig_a0 = args[0];
-               args++;
-               i++;
-               n--;
-       }
-       memcpy(&regs->a1 + i * sizeof(regs->a1), args, n * sizeof(regs->a0));
+       regs->orig_a0 = args[0];
+       args++;
+       memcpy(&regs->a1, args, 5 * sizeof(regs->a1));
 }
 
 static inline int
index 3e7c8ecf151e13841d0c3e491f3927aa1586434c..e3dead402e5fbe94ebe53063968801c8f51360b5 100644 (file)
@@ -23,6 +23,7 @@ generic-y += irq_work.h
 generic-y += kdebug.h
 generic-y += kmap_types.h
 generic-y += kprobes.h
+generic-y += kvm_para.h
 generic-y += linkage.h
 generic-y += local.h
 generic-y += local64.h
index 924990401237126585ea8fd105e4b57e8f9e5b24..ddd483c6ca95c9df50e9ed7b8c820c9884afcbeb 100644 (file)
@@ -17,34 +17,14 @@ syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
 
 static inline void
 syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
-                     unsigned int i, unsigned int n, unsigned long *args)
+                     unsigned long *args)
 {
-       BUG_ON(i + n > 6);
-
-       while (n > 0) {
-               switch (i) {
-               case 0:
-                       *args++ = regs->er1;
-                       break;
-               case 1:
-                       *args++ = regs->er2;
-                       break;
-               case 2:
-                       *args++ = regs->er3;
-                       break;
-               case 3:
-                       *args++ = regs->er4;
-                       break;
-               case 4:
-                       *args++ = regs->er5;
-                       break;
-               case 5:
-                       *args++ = regs->er6;
-                       break;
-               }
-               i++;
-               n--;
-       }
+       *args++ = regs->er1;
+       *args++ = regs->er2;
+       *args++ = regs->er3;
+       *args++ = regs->er4;
+       *args++ = regs->er5;
+       *args   = regs->er6;
 }
 
 
index 755bb11323d8feb92d12349bf444f151cd8acc29..1c72f04ff75da1a7f6918f00b14116a183a79313 100644 (file)
@@ -1,2 +1 @@
-generic-y += kvm_para.h
 generic-y += ucontext.h
index b25fd42aa0f47372162decdff321f3cca2e1a4d8..d046e8ccdf786be5029237ad722d819de88d6124 100644 (file)
@@ -19,6 +19,7 @@ generic-y += irq_work.h
 generic-y += kdebug.h
 generic-y += kmap_types.h
 generic-y += kprobes.h
+generic-y += kvm_para.h
 generic-y += local.h
 generic-y += local64.h
 generic-y += mcs_spinlock.h
index 4af9c7b6f13af9490e4bee7b9f608c7d467cecb3..ae3a1e24fabd7193ff7d3dc142c4ca7d123f56e6 100644 (file)
@@ -37,10 +37,8 @@ static inline long syscall_get_nr(struct task_struct *task,
 
 static inline void syscall_get_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         unsigned long *args)
 {
-       BUG_ON(i + n > 6);
-       memcpy(args, &(&regs->r00)[i], n * sizeof(args[0]));
+       memcpy(args, &(&regs->r00)[0], 6 * sizeof(args[0]));
 }
 #endif
diff --git a/arch/hexagon/include/uapi/asm/kvm_para.h b/arch/hexagon/include/uapi/asm/kvm_para.h
deleted file mode 100644 (file)
index baacc49..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#include <asm-generic/kvm_para.h>
index 43e21fe3499c43451f2915ff1274afa76059f16b..11f191689c9e8445a77ede3555452fb8d008a3d7 100644 (file)
@@ -2,6 +2,7 @@ generated-y += syscall_table.h
 generic-y += compat.h
 generic-y += exec.h
 generic-y += irq_work.h
+generic-y += kvm_para.h
 generic-y += mcs_spinlock.h
 generic-y += mm-arch-hooks.h
 generic-y += preempt.h
index 1d0b875fec44fc0fd8a70876e8ee6191f66f6005..0d9e7fab4a79fddcc63d24c30f002e73d2b91db0 100644 (file)
@@ -59,26 +59,19 @@ static inline void syscall_set_return_value(struct task_struct *task,
 }
 
 extern void ia64_syscall_get_set_arguments(struct task_struct *task,
-       struct pt_regs *regs, unsigned int i, unsigned int n,
-       unsigned long *args, int rw);
+       struct pt_regs *regs, unsigned long *args, int rw);
 static inline void syscall_get_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         unsigned long *args)
 {
-       BUG_ON(i + n > 6);
-
-       ia64_syscall_get_set_arguments(task, regs, i, n, args, 0);
+       ia64_syscall_get_set_arguments(task, regs, args, 0);
 }
 
 static inline void syscall_set_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         unsigned long *args)
 {
-       BUG_ON(i + n > 6);
-
-       ia64_syscall_get_set_arguments(task, regs, i, n, args, 1);
+       ia64_syscall_get_set_arguments(task, regs, args, 1);
 }
 
 static inline int syscall_get_arch(void)
index 20018cb883a90981565284ac1d5d9df51c38e987..62a9522af51e6651f560e06f8d3c3e2602b63f20 100644 (file)
@@ -1,2 +1 @@
 generated-y += unistd_64.h
-generic-y += kvm_para.h
index 6d50ede0ed691ca1899540722e65edb3cf896510..bf9c24d9ce84e66d1519ce7e5aa65330628d221b 100644 (file)
@@ -2179,12 +2179,11 @@ static void syscall_get_set_args_cb(struct unw_frame_info *info, void *data)
 }
 
 void ia64_syscall_get_set_arguments(struct task_struct *task,
-       struct pt_regs *regs, unsigned int i, unsigned int n,
-       unsigned long *args, int rw)
+       struct pt_regs *regs, unsigned long *args, int rw)
 {
        struct syscall_get_set_args data = {
-               .i = i,
-               .n = n,
+               .i = 0,
+               .n = 6,
                .args = args,
                .regs = regs,
                .rw = rw,
index 95f8f631c4df08aebc71b25d878fc29e0f89a1ba..2c359d9e80f63fe44468c29b7a48bb4033a9c31a 100644 (file)
@@ -13,6 +13,7 @@ generic-y += irq_work.h
 generic-y += kdebug.h
 generic-y += kmap_types.h
 generic-y += kprobes.h
+generic-y += kvm_para.h
 generic-y += local.h
 generic-y += local64.h
 generic-y += mcs_spinlock.h
index 8a7ad40be463656854310b85dbba06d5f9b8e189..7417847dc438e5ff6aff14f04094a1323d6b933f 100644 (file)
@@ -1,2 +1 @@
 generated-y += unistd_32.h
-generic-y += kvm_para.h
index 791cc8d54d0a9eff79b5264a74b507fc3233699c..1a8285c3f693990c8a8f3f7d5d37d240b17c4a80 100644 (file)
@@ -17,6 +17,7 @@ generic-y += irq_work.h
 generic-y += kdebug.h
 generic-y += kmap_types.h
 generic-y += kprobes.h
+generic-y += kvm_para.h
 generic-y += linkage.h
 generic-y += local.h
 generic-y += local64.h
index 220decd605a4aded46a99b445e54bf27c4adc821..833d3a53dab30182b586dd364cd323d1db07835a 100644 (file)
@@ -82,18 +82,22 @@ static inline void microblaze_set_syscall_arg(struct pt_regs *regs,
 
 static inline void syscall_get_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         unsigned long *args)
 {
+       unsigned int i = 0;
+       unsigned int n = 6;
+
        while (n--)
                *args++ = microblaze_get_syscall_arg(regs, i++);
 }
 
 static inline void syscall_set_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         const unsigned long *args)
 {
+       unsigned int i = 0;
+       unsigned int n = 6;
+
        while (n--)
                microblaze_set_syscall_arg(regs, i++, *args++);
 }
index 3ce84fbb2678f2194de4e81504494045a00915ed..13f59631c576c6bcd4c50357269fa42c0525e62a 100644 (file)
@@ -1,3 +1,2 @@
 generated-y += unistd_32.h
-generic-y += kvm_para.h
 generic-y += ucontext.h
index f607888d24838be1c7d1ecd0fe7c20736b7d2ed1..184eb65a6ba71a5bea1e6b39cbe5d389a9764416 100644 (file)
@@ -1,6 +1,10 @@
 # require CONFIG_CPU_MIPS32_R2=y
 
 CONFIG_LEGACY_BOARD_OCELOT=y
+CONFIG_FIT_IMAGE_FDT_OCELOT=y
+
+CONFIG_BRIDGE=y
+CONFIG_GENERIC_PHY=y
 
 CONFIG_MTD=y
 CONFIG_MTD_CMDLINE_PARTS=y
@@ -19,6 +23,8 @@ CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_SERIAL_OF_PLATFORM=y
 
 CONFIG_NETDEVICES=y
+CONFIG_NET_SWITCHDEV=y
+CONFIG_NET_DSA=y
 CONFIG_MSCC_OCELOT_SWITCH=y
 CONFIG_MSCC_OCELOT_SWITCH_OCELOT=y
 CONFIG_MDIO_MSCC_MIIM=y
@@ -35,6 +41,8 @@ CONFIG_SPI_DESIGNWARE=y
 CONFIG_SPI_DW_MMIO=y
 CONFIG_SPI_SPIDEV=y
 
+CONFIG_PINCTRL_OCELOT=y
+
 CONFIG_GPIO_SYSFS=y
 
 CONFIG_POWER_RESET=y
index 6cf8ffb5367ec3fb725aac26c701d0ae5d81923c..a2b4748655df4d1466d037b971855c8046c589e8 100644 (file)
@@ -116,9 +116,10 @@ static inline void syscall_set_return_value(struct task_struct *task,
 
 static inline void syscall_get_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         unsigned long *args)
 {
+       unsigned int i = 0;
+       unsigned int n = 6;
        int ret;
 
        /* O32 ABI syscall() */
index 6e574c02e4c3b81137618c97fe9bc176c5a40d52..ea781b29f7f17291d90391c87a8a250772d17a37 100644 (file)
@@ -33,6 +33,7 @@
 #include <asm/processor.h>
 #include <asm/sigcontext.h>
 #include <linux/uaccess.h>
+#include <asm/irq_regs.h>
 
 static struct hard_trap_info {
        unsigned char tt;       /* Trap type code for MIPS R3xxx and R4xxx */
@@ -214,7 +215,7 @@ void kgdb_call_nmi_hook(void *ignored)
        old_fs = get_fs();
        set_fs(KERNEL_DS);
 
-       kgdb_nmicallback(raw_smp_processor_id(), NULL);
+       kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs());
 
        set_fs(old_fs);
 }
index 0057c910bc2f34de0f518c43d2e234c845db0da1..3a62f80958e170527a93f4058d60f5372d5773ee 100644 (file)
@@ -1419,7 +1419,7 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
 
                sd.nr = syscall;
                sd.arch = syscall_get_arch();
-               syscall_get_arguments(current, regs, 0, 6, args);
+               syscall_get_arguments(current, regs, args);
                for (i = 0; i < 6; i++)
                        sd.args[i] = args[i];
                sd.instruction_pointer = KSTK_EIP(current);
index 710a59764b01c164d3ffae92f18a394224bdc153..a32f843cdbe02299e34bf7f0897ad61f6e23dce5 100644 (file)
@@ -118,7 +118,6 @@ static void shutdown_bridge_irq(struct irq_data *d)
 {
        struct hub_irq_data *hd = irq_data_get_irq_chip_data(d);
        struct bridge_controller *bc;
-       int pin = hd->pin;
 
        if (!hd)
                return;
@@ -126,7 +125,7 @@ static void shutdown_bridge_irq(struct irq_data *d)
        disable_hub_irq(d);
 
        bc = hd->bc;
-       bridge_clr(bc, b_int_enable, (1 << pin));
+       bridge_clr(bc, b_int_enable, (1 << hd->pin));
        bridge_read(bc, b_wid_tflush);
 }
 
index f7e5e86765fe8efe51283d7c350a6a4ab8b73863..671ebd357496c4e1608b240d2ba23c4d3d9efa42 100644 (file)
@@ -108,81 +108,41 @@ void syscall_set_return_value(struct task_struct *task, struct pt_regs *regs,
  * syscall_get_arguments - extract system call parameter values
  * @task:      task of interest, must be blocked
  * @regs:      task_pt_regs() of @task
- * @i:         argument index [0,5]
- * @n:         number of arguments; n+i must be [1,6].
  * @args:      array filled with argument values
  *
- * Fetches @n arguments to the system call starting with the @i'th argument
- * (from 0 through 5).  Argument @i is stored in @args[0], and so on.
- * An arch inline version is probably optimal when @i and @n are constants.
+ * Fetches 6 arguments to the system call (from 0 through 5). The first
+ * argument is stored in @args[0], and so on.
  *
  * It's only valid to call this when @task is stopped for tracing on
  * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT.
- * It's invalid to call this with @i + @n > 6; we only support system calls
- * taking up to 6 arguments.
  */
 #define SYSCALL_MAX_ARGS 6
 void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
-                          unsigned int i, unsigned int n, unsigned long *args)
+                          unsigned long *args)
 {
-       if (n == 0)
-               return;
-       if (i + n > SYSCALL_MAX_ARGS) {
-               unsigned long *args_bad = args + SYSCALL_MAX_ARGS - i;
-               unsigned int n_bad = n + i - SYSCALL_MAX_ARGS;
-               pr_warning("%s called with max args %d, handling only %d\n",
-                          __func__, i + n, SYSCALL_MAX_ARGS);
-               memset(args_bad, 0, n_bad * sizeof(args[0]));
-               memset(args_bad, 0, n_bad * sizeof(args[0]));
-       }
-
-       if (i == 0) {
-               args[0] = regs->orig_r0;
-               args++;
-               i++;
-               n--;
-       }
-
-       memcpy(args, &regs->uregs[0] + i, n * sizeof(args[0]));
+       args[0] = regs->orig_r0;
+       args++;
+       memcpy(args, &regs->uregs[0] + 1, 5 * sizeof(args[0]));
 }
 
 /**
  * syscall_set_arguments - change system call parameter value
  * @task:      task of interest, must be in system call entry tracing
  * @regs:      task_pt_regs() of @task
- * @i:         argument index [0,5]
- * @n:         number of arguments; n+i must be [1,6].
  * @args:      array of argument values to store
  *
- * Changes @n arguments to the system call starting with the @i'th argument.
- * Argument @i gets value @args[0], and so on.
- * An arch inline version is probably optimal when @i and @n are constants.
+ * Changes 6 arguments to the system call. The first argument gets value
+ * @args[0], and so on.
  *
  * It's only valid to call this when @task is stopped for tracing on
  * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT.
- * It's invalid to call this with @i + @n > 6; we only support system calls
- * taking up to 6 arguments.
  */
 void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs,
-                          unsigned int i, unsigned int n,
                           const unsigned long *args)
 {
-       if (n == 0)
-               return;
-
-       if (i + n > SYSCALL_MAX_ARGS) {
-               pr_warn("%s called with max args %d, handling only %d\n",
-                       __func__, i + n, SYSCALL_MAX_ARGS);
-               n = SYSCALL_MAX_ARGS - i;
-       }
-
-       if (i == 0) {
-               regs->orig_r0 = args[0];
-               args++;
-               i++;
-               n--;
-       }
+       regs->orig_r0 = args[0];
+       args++;
 
-       memcpy(&regs->uregs[0] + i, args, n * sizeof(args[0]));
+       memcpy(&regs->uregs[0] + 1, args, 5 * sizeof(args[0]));
 }
 #endif /* _ASM_NDS32_SYSCALL_H */
index 8fde4fa2c34f758df132e659eb69b4331c84172f..88a667d12aaa9cefafad5260f03e073fefeb1fed 100644 (file)
@@ -23,6 +23,7 @@ generic-y += irq_work.h
 generic-y += kdebug.h
 generic-y += kmap_types.h
 generic-y += kprobes.h
+generic-y += kvm_para.h
 generic-y += local.h
 generic-y += mcs_spinlock.h
 generic-y += mm-arch-hooks.h
index 9de220854c4ad88f43ea579cbcf51c250cb6e688..d7624ed06efb6c9ea2e616c23cd20b030a53b1c8 100644 (file)
@@ -58,81 +58,25 @@ static inline void syscall_set_return_value(struct task_struct *task,
 }
 
 static inline void syscall_get_arguments(struct task_struct *task,
-       struct pt_regs *regs, unsigned int i, unsigned int n,
-       unsigned long *args)
+       struct pt_regs *regs, unsigned long *args)
 {
-       BUG_ON(i + n > 6);
-
-       switch (i) {
-       case 0:
-               if (!n--)
-                       break;
-               *args++ = regs->r4;
-       case 1:
-               if (!n--)
-                       break;
-               *args++ = regs->r5;
-       case 2:
-               if (!n--)
-                       break;
-               *args++ = regs->r6;
-       case 3:
-               if (!n--)
-                       break;
-               *args++ = regs->r7;
-       case 4:
-               if (!n--)
-                       break;
-               *args++ = regs->r8;
-       case 5:
-               if (!n--)
-                       break;
-               *args++ = regs->r9;
-       case 6:
-               if (!n--)
-                       break;
-       default:
-               BUG();
-       }
+       *args++ = regs->r4;
+       *args++ = regs->r5;
+       *args++ = regs->r6;
+       *args++ = regs->r7;
+       *args++ = regs->r8;
+       *args   = regs->r9;
 }
 
 static inline void syscall_set_arguments(struct task_struct *task,
-       struct pt_regs *regs, unsigned int i, unsigned int n,
-       const unsigned long *args)
+       struct pt_regs *regs, const unsigned long *args)
 {
-       BUG_ON(i + n > 6);
-
-       switch (i) {
-       case 0:
-               if (!n--)
-                       break;
-               regs->r4 = *args++;
-       case 1:
-               if (!n--)
-                       break;
-               regs->r5 = *args++;
-       case 2:
-               if (!n--)
-                       break;
-               regs->r6 = *args++;
-       case 3:
-               if (!n--)
-                       break;
-               regs->r7 = *args++;
-       case 4:
-               if (!n--)
-                       break;
-               regs->r8 = *args++;
-       case 5:
-               if (!n--)
-                       break;
-               regs->r9 = *args++;
-       case 6:
-               if (!n)
-                       break;
-       default:
-               BUG();
-       }
+       regs->r4 = *args++;
+       regs->r5 = *args++;
+       regs->r6 = *args++;
+       regs->r7 = *args++;
+       regs->r8 = *args++;
+       regs->r9 = *args;
 }
 
 #endif
index 755bb11323d8feb92d12349bf444f151cd8acc29..1c72f04ff75da1a7f6918f00b14116a183a79313 100644 (file)
@@ -1,2 +1 @@
-generic-y += kvm_para.h
 generic-y += ucontext.h
index 5a73e2956ac46953ac9e1cf2404c3d0996617602..22aa97136c0195ae2b687c0793c42e43f22888ec 100644 (file)
@@ -20,6 +20,7 @@ generic-y += irq_work.h
 generic-y += kdebug.h
 generic-y += kmap_types.h
 generic-y += kprobes.h
+generic-y += kvm_para.h
 generic-y += local.h
 generic-y += mcs_spinlock.h
 generic-y += mm-arch-hooks.h
index 2db9f1cf0694c0f2c6bdaec77953f62fb4fe6372..b4ff07c1baed5d13c9abb0d025a1ece11ee78ad0 100644 (file)
@@ -56,20 +56,16 @@ syscall_set_return_value(struct task_struct *task, struct pt_regs *regs,
 
 static inline void
 syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
-                     unsigned int i, unsigned int n, unsigned long *args)
+                     unsigned long *args)
 {
-       BUG_ON(i + n > 6);
-
-       memcpy(args, &regs->gpr[3 + i], n * sizeof(args[0]));
+       memcpy(args, &regs->gpr[3], 6 * sizeof(args[0]));
 }
 
 static inline void
 syscall_set_arguments(struct task_struct *task, struct pt_regs *regs,
-                     unsigned int i, unsigned int n, const unsigned long *args)
+                     const unsigned long *args)
 {
-       BUG_ON(i + n > 6);
-
-       memcpy(&regs->gpr[3 + i], args, n * sizeof(args[0]));
+       memcpy(&regs->gpr[3], args, 6 * sizeof(args[0]));
 }
 
 static inline int syscall_get_arch(void)
index 755bb11323d8feb92d12349bf444f151cd8acc29..1c72f04ff75da1a7f6918f00b14116a183a79313 100644 (file)
@@ -1,2 +1 @@
-generic-y += kvm_para.h
 generic-y += ucontext.h
index 6f49e77d82a2a48673356355e38a411794ff51bb..9bcd0c903dbbef2aee61ade11844c5091e4bfd0c 100644 (file)
@@ -11,6 +11,7 @@ generic-y += irq_regs.h
 generic-y += irq_work.h
 generic-y += kdebug.h
 generic-y += kprobes.h
+generic-y += kvm_para.h
 generic-y += local.h
 generic-y += local64.h
 generic-y += mcs_spinlock.h
index 2a27b275ab092cc60b3d003250aaaf647aa9c916..9ff033d261ab381c9e356fea458d768170f9effc 100644 (file)
@@ -22,13 +22,14 @@ unsigned long profile_pc(struct pt_regs *);
 
 static inline unsigned long regs_return_value(struct pt_regs *regs)
 {
-       return regs->gr[20];
+       return regs->gr[28];
 }
 
 static inline void instruction_pointer_set(struct pt_regs *regs,
                                                unsigned long val)
 {
-        regs->iaoq[0] = val;
+       regs->iaoq[0] = val;
+       regs->iaoq[1] = val + 4;
 }
 
 /* Query offset/name of register from its name/offset */
index 8bff1a58c97f1b107dabf79e172f5ecb56c5db2d..62a6d477fae0197cdba9d62044e31104f1b05192 100644 (file)
@@ -18,29 +18,15 @@ static inline long syscall_get_nr(struct task_struct *tsk,
 }
 
 static inline void syscall_get_arguments(struct task_struct *tsk,
-                                        struct pt_regs *regs, unsigned int i,
-                                        unsigned int n, unsigned long *args)
+                                        struct pt_regs *regs,
+                                        unsigned long *args)
 {
-       BUG_ON(i);
-
-       switch (n) {
-       case 6:
-               args[5] = regs->gr[21];
-       case 5:
-               args[4] = regs->gr[22];
-       case 4:
-               args[3] = regs->gr[23];
-       case 3:
-               args[2] = regs->gr[24];
-       case 2:
-               args[1] = regs->gr[25];
-       case 1:
-               args[0] = regs->gr[26];
-       case 0:
-               break;
-       default:
-               BUG();
-       }
+       args[5] = regs->gr[21];
+       args[4] = regs->gr[22];
+       args[3] = regs->gr[23];
+       args[2] = regs->gr[24];
+       args[1] = regs->gr[25];
+       args[0] = regs->gr[26];
 }
 
 static inline long syscall_get_return_value(struct task_struct *task,
index 22fdbd08cdc8551777459efe75c49bbfa1e3f95a..2bd5b392277c2cf5c4a52f3d0b7d9aaed7382f44 100644 (file)
@@ -1,3 +1,2 @@
 generated-y += unistd_32.h
 generated-y += unistd_64.h
-generic-y += kvm_para.h
index eb39e7e380d7e27b24f6bae39ae0e6c3583511e3..841db71958cdb50dff183dd058a9b09a5ec81421 100644 (file)
@@ -210,12 +210,6 @@ void __cpuidle arch_cpu_idle(void)
 
 static int __init parisc_idle_init(void)
 {
-       const char *marker;
-
-       /* check QEMU/SeaBIOS marker in PAGE0 */
-       marker = (char *) &PAGE0->pad0;
-       running_on_qemu = (memcmp(marker, "SeaBIOS", 8) == 0);
-
        if (!running_on_qemu)
                cpu_idle_poll_ctrl(1);
 
index 15dd9e21be7eac6d1fcf37d67f72de9b227bfa75..d908058d05c10bf4880e361070c30a42b668fd7e 100644 (file)
@@ -397,6 +397,9 @@ void __init start_parisc(void)
        int ret, cpunum;
        struct pdc_coproc_cfg coproc_cfg;
 
+       /* check QEMU/SeaBIOS marker in PAGE0 */
+       running_on_qemu = (memcmp(&PAGE0->pad0, "SeaBIOS", 8) == 0);
+
        cpunum = smp_processor_id();
 
        init_cpu_topology();
index 598cdcdd13553dea4a80a9b72196dbee8987cd61..8ddd4a91bdc1e2fe9a2e4a617b32cc0e6e15e572 100644 (file)
@@ -352,7 +352,7 @@ static inline bool strict_kernel_rwx_enabled(void)
 #if defined(CONFIG_SPARSEMEM_VMEMMAP) && defined(CONFIG_SPARSEMEM_EXTREME) &&  \
        defined (CONFIG_PPC_64K_PAGES)
 #define MAX_PHYSMEM_BITS        51
-#elif defined(CONFIG_SPARSEMEM)
+#elif defined(CONFIG_PPC64)
 #define MAX_PHYSMEM_BITS        46
 #endif
 
index 1a0e7a8b1c811cf5d089c5ac68eb96d189ad702d..1243045bad2d633d4bd2df3d3e086bd2988987d0 100644 (file)
@@ -65,22 +65,20 @@ static inline void syscall_set_return_value(struct task_struct *task,
 
 static inline void syscall_get_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         unsigned long *args)
 {
        unsigned long val, mask = -1UL;
-
-       BUG_ON(i + n > 6);
+       unsigned int n = 6;
 
 #ifdef CONFIG_COMPAT
        if (test_tsk_thread_flag(task, TIF_32BIT))
                mask = 0xffffffff;
 #endif
        while (n--) {
-               if (n == 0 && i == 0)
+               if (n == 0)
                        val = regs->orig_gpr3;
                else
-                       val = regs->gpr[3 + i + n];
+                       val = regs->gpr[3 + n];
 
                args[n] = val & mask;
        }
@@ -88,15 +86,12 @@ static inline void syscall_get_arguments(struct task_struct *task,
 
 static inline void syscall_set_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         const unsigned long *args)
 {
-       BUG_ON(i + n > 6);
-       memcpy(&regs->gpr[3 + i], args, n * sizeof(args[0]));
+       memcpy(&regs->gpr[3], args, 6 * sizeof(args[0]));
 
        /* Also copy the first argument into orig_gpr3 */
-       if (i == 0 && n > 0)
-               regs->orig_gpr3 = args[0];
+       regs->orig_gpr3 = args[0];
 }
 
 static inline int syscall_get_arch(void)
index a5b8fbae56a03b491f0982562f3d590cff16ca5f..9481a117e24255173231ac687c9e99b730bff420 100644 (file)
@@ -656,11 +656,17 @@ EXC_COMMON_BEGIN(data_access_slb_common)
        ld      r4,PACA_EXSLB+EX_DAR(r13)
        std     r4,_DAR(r1)
        addi    r3,r1,STACK_FRAME_OVERHEAD
+BEGIN_MMU_FTR_SECTION
+       /* HPT case, do SLB fault */
        bl      do_slb_fault
        cmpdi   r3,0
        bne-    1f
        b       fast_exception_return
 1:     /* Error case */
+MMU_FTR_SECTION_ELSE
+       /* Radix case, access is outside page table range */
+       li      r3,-EFAULT
+ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
        std     r3,RESULT(r1)
        bl      save_nvgprs
        RECONCILE_IRQ_STATE(r10, r11)
@@ -705,11 +711,17 @@ EXC_COMMON_BEGIN(instruction_access_slb_common)
        EXCEPTION_PROLOG_COMMON(0x480, PACA_EXSLB)
        ld      r4,_NIP(r1)
        addi    r3,r1,STACK_FRAME_OVERHEAD
+BEGIN_MMU_FTR_SECTION
+       /* HPT case, do SLB fault */
        bl      do_slb_fault
        cmpdi   r3,0
        bne-    1f
        b       fast_exception_return
 1:     /* Error case */
+MMU_FTR_SECTION_ELSE
+       /* Radix case, access is outside page table range */
+       li      r3,-EFAULT
+ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
        std     r3,RESULT(r1)
        bl      save_nvgprs
        RECONCILE_IRQ_STATE(r10, r11)
index 48051c8977c5603a1ac9f8b730c0283ab04497d8..e25b615e9f9e642d34e9387aac7db652131a466f 100644 (file)
@@ -851,10 +851,6 @@ __secondary_start:
        tophys(r4,r2)
        addi    r4,r4,THREAD    /* phys address of our thread_struct */
        mtspr   SPRN_SPRG_THREAD,r4
-#ifdef CONFIG_PPC_RTAS
-       li      r3,0
-       stw     r3, RTAS_SP(r4)         /* 0 => not in RTAS */
-#endif
        lis     r4, (swapper_pg_dir - PAGE_OFFSET)@h
        ori     r4, r4, (swapper_pg_dir - PAGE_OFFSET)@l
        mtspr   SPRN_SPRG_PGDIR, r4
@@ -941,10 +937,6 @@ start_here:
        tophys(r4,r2)
        addi    r4,r4,THREAD    /* init task's THREAD */
        mtspr   SPRN_SPRG_THREAD,r4
-#ifdef CONFIG_PPC_RTAS
-       li      r3,0
-       stw     r3, RTAS_SP(r4)         /* 0 => not in RTAS */
-#endif
        lis     r4, (swapper_pg_dir - PAGE_OFFSET)@h
        ori     r4, r4, (swapper_pg_dir - PAGE_OFFSET)@l
        mtspr   SPRN_SPRG_PGDIR, r4
index 683b5b3805bd17493d97c261afc19279ac76b69f..cd381e2291dfeb38a569fed214778838cef42a2e 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/kvm_host.h>
 #include <linux/init.h>
 #include <linux/export.h>
+#include <linux/kmemleak.h>
 #include <linux/kvm_para.h>
 #include <linux/slab.h>
 #include <linux/of.h>
@@ -712,6 +713,12 @@ static void kvm_use_magic_page(void)
 
 static __init void kvm_free_tmp(void)
 {
+       /*
+        * Inform kmemleak about the hole in the .bss section since the
+        * corresponding pages will be unmapped with DEBUG_PAGEALLOC=y.
+        */
+       kmemleak_free_part(&kvm_tmp[kvm_tmp_index],
+                          ARRAY_SIZE(kvm_tmp) - kvm_tmp_index);
        free_reserved_area(&kvm_tmp[kvm_tmp_index],
                           &kvm_tmp[ARRAY_SIZE(kvm_tmp)], -1, NULL);
 }
index 1e0bc5955a400601b106949f14c7a0ca64d1a6a6..afd516b572f8637447315ec882c08189bcf2fb4d 100644 (file)
@@ -98,7 +98,7 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
         * can be used, r7 contains NSEC_PER_SEC.
         */
 
-       lwz     r5,WTOM_CLOCK_SEC(r9)
+       lwz     r5,(WTOM_CLOCK_SEC+LOPART)(r9)
        lwz     r6,WTOM_CLOCK_NSEC(r9)
 
        /* We now have our offset in r5,r6. We create a fake dependency
index 844d8e774492e65929168bfff4d0655fa50dda74..b7f6f6e0b6e801c6cf0fbb1d11d5c0d53014fb4d 100644 (file)
@@ -215,11 +215,20 @@ _GLOBAL_TOC(memcmp)
        beq     .Lzero
 
 .Lcmp_rest_lt8bytes:
-       /* Here we have only less than 8 bytes to compare with. at least s1
-        * Address is aligned with 8 bytes.
-        * The next double words are load and shift right with appropriate
-        * bits.
+       /*
+        * Here we have less than 8 bytes to compare. At least s1 is aligned to
+        * 8 bytes, but s2 may not be. We must make sure s2 + 7 doesn't cross a
+        * page boundary, otherwise we might read past the end of the buffer and
+        * trigger a page fault. We use 4K as the conservative minimum page
+        * size. If we detect that case we go to the byte-by-byte loop.
+        *
+        * Otherwise the next double word is loaded from s1 and s2, and shifted
+        * right to compare the appropriate bits.
         */
+       clrldi  r6,r4,(64-12)   // r6 = r4 & 0xfff
+       cmpdi   r6,0xff8
+       bgt     .Lshort
+
        subfic  r6,r5,8
        slwi    r6,r6,3
        LD      rA,0,r3
index 6ed22127391b6d0a7789bb363476452bf0991a65..921f12182f3e01a850372fd51b0a88a5bede296c 100644 (file)
@@ -77,18 +77,27 @@ static u32 cpu_to_drc_index(int cpu)
 
                ret = drc.drc_index_start + (thread_index * drc.sequential_inc);
        } else {
-               const __be32 *indexes;
-
-               indexes = of_get_property(dn, "ibm,drc-indexes", NULL);
-               if (indexes == NULL)
-                       goto err_of_node_put;
+               u32 nr_drc_indexes, thread_drc_index;
 
                /*
-                * The first element indexes[0] is the number of drc_indexes
-                * returned in the list.  Hence thread_index+1 will get the
-                * drc_index corresponding to core number thread_index.
+                * The first element of ibm,drc-indexes array is the
+                * number of drc_indexes returned in the list.  Hence
+                * thread_index+1 will get the drc_index corresponding
+                * to core number thread_index.
                 */
-               ret = indexes[thread_index + 1];
+               rc = of_property_read_u32_index(dn, "ibm,drc-indexes",
+                                               0, &nr_drc_indexes);
+               if (rc)
+                       goto err_of_node_put;
+
+               WARN_ON_ONCE(thread_index > nr_drc_indexes);
+               rc = of_property_read_u32_index(dn, "ibm,drc-indexes",
+                                               thread_index + 1,
+                                               &thread_drc_index);
+               if (rc)
+                       goto err_of_node_put;
+
+               ret = thread_drc_index;
        }
 
        rc = 0;
index d97d52772789b70187c5a2336c6fdee76fc48154..452dcfd7e5dd15083715eceef980642fdfb83efd 100644 (file)
@@ -550,6 +550,7 @@ static void pseries_print_mce_info(struct pt_regs *regs,
                "UE",
                "SLB",
                "ERAT",
+               "Unknown",
                "TLB",
                "D-Cache",
                "Unknown",
diff --git a/arch/riscv/configs/rv32_defconfig b/arch/riscv/configs/rv32_defconfig
new file mode 100644 (file)
index 0000000..1a911ed
--- /dev/null
@@ -0,0 +1,84 @@
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_CGROUPS=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_CFS_BANDWIDTH=y
+CONFIG_CGROUP_BPF=y
+CONFIG_NAMESPACES=y
+CONFIG_USER_NS=y
+CONFIG_CHECKPOINT_RESTORE=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EXPERT=y
+CONFIG_BPF_SYSCALL=y
+CONFIG_ARCH_RV32I=y
+CONFIG_SMP=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+CONFIG_NETLINK_DIAG=y
+CONFIG_PCI=y
+CONFIG_PCIEPORTBUS=y
+CONFIG_PCI_HOST_GENERIC=y
+CONFIG_PCIE_XILINX=y
+CONFIG_DEVTMPFS=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_VIRTIO_BLK=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_ATA=y
+CONFIG_SATA_AHCI=y
+CONFIG_SATA_AHCI_PLATFORM=y
+CONFIG_NETDEVICES=y
+CONFIG_VIRTIO_NET=y
+CONFIG_MACB=y
+CONFIG_E1000E=y
+CONFIG_R8169=y
+CONFIG_MICROSEMI_PHY=y
+CONFIG_INPUT_MOUSEDEV=y
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_SERIAL_EARLYCON_RISCV_SBI=y
+CONFIG_HVC_RISCV_SBI=y
+# CONFIG_PTP_1588_CLOCK is not set
+CONFIG_DRM=y
+CONFIG_DRM_RADEON=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_USB=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_XHCI_PLATFORM=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_UAS=y
+CONFIG_VIRTIO_MMIO=y
+CONFIG_SIFIVE_PLIC=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_AUTOFS4_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V4=y
+CONFIG_NFS_V4_1=y
+CONFIG_NFS_V4_2=y
+CONFIG_ROOT_NFS=y
+CONFIG_CRYPTO_USER_API_HASH=y
+CONFIG_CRYPTO_DEV_VIRTIO=y
+CONFIG_PRINTK_TIME=y
+# CONFIG_RCU_TRACE is not set
index 57afe604b495bef44894b5088517c103376684d4..c207f6634b91c4ecc8f60b759c82056dd5624ed4 100644 (file)
@@ -26,7 +26,7 @@ enum fixed_addresses {
 };
 
 #define FIXADDR_SIZE           (__end_of_fixed_addresses * PAGE_SIZE)
-#define FIXADDR_TOP            (PAGE_OFFSET)
+#define FIXADDR_TOP            (VMALLOC_START)
 #define FIXADDR_START          (FIXADDR_TOP - FIXADDR_SIZE)
 
 #define FIXMAP_PAGE_IO         PAGE_KERNEL
index bba3da6ef1572f41db64e59ca203ae32b9139180..a3d5273ded7c6d0782356f01abf7d5cdca753bc5 100644 (file)
@@ -72,32 +72,20 @@ static inline void syscall_set_return_value(struct task_struct *task,
 
 static inline void syscall_get_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         unsigned long *args)
 {
-       BUG_ON(i + n > 6);
-       if (i == 0) {
-               args[0] = regs->orig_a0;
-               args++;
-               i++;
-               n--;
-       }
-       memcpy(args, &regs->a1 + i * sizeof(regs->a1), n * sizeof(args[0]));
+       args[0] = regs->orig_a0;
+       args++;
+       memcpy(args, &regs->a1, 5 * sizeof(args[0]));
 }
 
 static inline void syscall_set_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         const unsigned long *args)
 {
-       BUG_ON(i + n > 6);
-        if (i == 0) {
-                regs->orig_a0 = args[0];
-                args++;
-                i++;
-                n--;
-        }
-       memcpy(&regs->a1 + i * sizeof(regs->a1), args, n * sizeof(regs->a0));
+       regs->orig_a0 = args[0];
+       args++;
+       memcpy(&regs->a1, args, 5 * sizeof(regs->a1));
 }
 
 static inline int syscall_get_arch(void)
index a00168b980d2e6ca265ae0424045508275fbbe3f..fb53a8089e769473434493d59bc408079dcbb519 100644 (file)
@@ -300,7 +300,7 @@ do {                                                                \
                "       .balign 4\n"                            \
                "4:\n"                                          \
                "       li %0, %6\n"                            \
-               "       jump 2b, %1\n"                          \
+               "       jump 3b, %1\n"                          \
                "       .previous\n"                            \
                "       .section __ex_table,\"a\"\n"            \
                "       .balign " RISCV_SZPTR "\n"                      \
index f13f7f276639d504679034a36c53edc15f25dfe1..598568168d3511406fea38b23360c7e28a50a41f 100644 (file)
@@ -4,7 +4,6 @@
 
 ifdef CONFIG_FTRACE
 CFLAGS_REMOVE_ftrace.o = -pg
-CFLAGS_REMOVE_setup.o = -pg
 endif
 
 extra-y += head.o
@@ -29,8 +28,6 @@ obj-y += vdso.o
 obj-y  += cacheinfo.o
 obj-y  += vdso/
 
-CFLAGS_setup.o := -mcmodel=medany
-
 obj-$(CONFIG_FPU)              += fpu.o
 obj-$(CONFIG_SMP)              += smpboot.o
 obj-$(CONFIG_SMP)              += smp.o
index 7dd308129b40f1862ab04dc1e12c790bf7c111fe..2872edce894d1e0b79d58a4ed735649dd8261408 100644 (file)
@@ -141,7 +141,7 @@ static int apply_r_riscv_hi20_rela(struct module *me, u32 *location,
 {
        s32 hi20;
 
-       if (IS_ENABLED(CMODEL_MEDLOW)) {
+       if (IS_ENABLED(CONFIG_CMODEL_MEDLOW)) {
                pr_err(
                  "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
                  me->name, (long long)v, location);
index ecb654f6a79ef105931a51950d520c1af845edff..540a331d1376922c62ba17bf0d9c786714d89948 100644 (file)
@@ -48,14 +48,6 @@ struct screen_info screen_info = {
 };
 #endif
 
-unsigned long va_pa_offset;
-EXPORT_SYMBOL(va_pa_offset);
-unsigned long pfn_base;
-EXPORT_SYMBOL(pfn_base);
-
-unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
-EXPORT_SYMBOL(empty_zero_page);
-
 /* The lucky hart to first increment this variable will boot the other cores */
 atomic_t hart_lottery;
 unsigned long boot_cpu_hartid;
index eb22ab49b3e008ec4ab677778302d5dbbea358b1..b68aac7018031cd5afe4ebb293051cbcc814969e 100644 (file)
@@ -1,3 +1,9 @@
+
+CFLAGS_init.o := -mcmodel=medany
+ifdef CONFIG_FTRACE
+CFLAGS_REMOVE_init.o = -pg
+endif
+
 obj-y += init.o
 obj-y += fault.o
 obj-y += extable.o
index b379a75ac6a6778052b9161612357ba5df620648..bc7b77e34d0920f2190c7e8c4edd18658c526703 100644 (file)
 #include <asm/pgtable.h>
 #include <asm/io.h>
 
+unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
+                                                       __page_aligned_bss;
+EXPORT_SYMBOL(empty_zero_page);
+
 static void __init zone_sizes_init(void)
 {
        unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, };
@@ -117,6 +121,14 @@ void __init setup_bootmem(void)
                         */
                        memblock_reserve(reg->base, vmlinux_end - reg->base);
                        mem_size = min(reg->size, (phys_addr_t)-PAGE_OFFSET);
+
+                       /*
+                        * Remove memblock from the end of usable area to the
+                        * end of region
+                        */
+                       if (reg->base + mem_size < end)
+                               memblock_remove(reg->base + mem_size,
+                                               end - reg->base - mem_size);
                }
        }
        BUG_ON(mem_size == 0);
@@ -143,6 +155,11 @@ void __init setup_bootmem(void)
        }
 }
 
+unsigned long va_pa_offset;
+EXPORT_SYMBOL(va_pa_offset);
+unsigned long pfn_base;
+EXPORT_SYMBOL(pfn_base);
+
 pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
 pgd_t trampoline_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
 
@@ -172,6 +189,25 @@ void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
        }
 }
 
+/*
+ * setup_vm() is called from head.S with MMU-off.
+ *
+ * Following requirements should be honoured for setup_vm() to work
+ * correctly:
+ * 1) It should use PC-relative addressing for accessing kernel symbols.
+ *    To achieve this we always use GCC cmodel=medany.
+ * 2) The compiler instrumentation for FTRACE will not work for setup_vm()
+ *    so disable compiler instrumentation when FTRACE is enabled.
+ *
+ * Currently, the above requirements are honoured by using custom CFLAGS
+ * for init.o in mm/Makefile.
+ */
+
+#ifndef __riscv_cmodel_medany
+#error "setup_vm() is called from head.S before relocate so it should "
+       "not use absolute addressing."
+#endif
+
 asmlinkage void __init setup_vm(void)
 {
        extern char _start;
index 1a6a7092d94209d4ee330003cfd3d2ccf713b916..e94a0a28b5ebe22b944ea73b1ac48bdcf52d9e63 100644 (file)
@@ -360,4 +360,15 @@ static inline struct ap_queue_status ap_dqap(ap_qid_t qid,
        return reg1;
 }
 
+/*
+ * Interface to tell the AP bus code that a configuration
+ * change has happened. The bus code should at least do
+ * an ap bus resource rescan.
+ */
+#if IS_ENABLED(CONFIG_ZCRYPT)
+void ap_bus_cfg_chg(void);
+#else
+static inline void ap_bus_cfg_chg(void){};
+#endif
+
 #endif /* _ASM_S390_AP_H_ */
index 7d22a474a040ddd3d0e76c84075db6ab17bb2263..f74639a05f0ffc33f638c264af58c48933e36139 100644 (file)
@@ -252,11 +252,14 @@ do {                                                              \
 
 /*
  * Cache aliasing on the latest machines calls for a mapping granularity
- * of 512KB. For 64-bit processes use a 512KB alignment and a randomization
- * of up to 1GB. For 31-bit processes the virtual address space is limited,
- * use no alignment and limit the randomization to 8MB.
+ * of 512KB for the anonymous mapping base. For 64-bit processes use a
+ * 512KB alignment and a randomization of up to 1GB. For 31-bit processes
+ * the virtual address space is limited, use no alignment and limit the
+ * randomization to 8MB.
+ * For the additional randomization of the program break use 32MB for
+ * 64-bit and 8MB for 31-bit.
  */
-#define BRK_RND_MASK   (is_compat_task() ? 0x7ffUL : 0x3ffffUL)
+#define BRK_RND_MASK   (is_compat_task() ? 0x7ffUL : 0x1fffUL)
 #define MMAP_RND_MASK  (is_compat_task() ? 0x7ffUL : 0x3ff80UL)
 #define MMAP_ALIGN_MASK        (is_compat_task() ? 0 : 0x7fUL)
 #define STACK_RND_MASK MMAP_RND_MASK
index cc0947e08b6ffef09419a52eb04f817535016127..5b9f10b1e55dec03c2878a6ab510cb0d128002e5 100644 (file)
@@ -91,52 +91,53 @@ struct lowcore {
        __u64   hardirq_timer;                  /* 0x02e8 */
        __u64   softirq_timer;                  /* 0x02f0 */
        __u64   steal_timer;                    /* 0x02f8 */
-       __u64   last_update_timer;              /* 0x0300 */
-       __u64   last_update_clock;              /* 0x0308 */
-       __u64   int_clock;                      /* 0x0310 */
-       __u64   mcck_clock;                     /* 0x0318 */
-       __u64   clock_comparator;               /* 0x0320 */
-       __u64   boot_clock[2];                  /* 0x0328 */
+       __u64   avg_steal_timer;                /* 0x0300 */
+       __u64   last_update_timer;              /* 0x0308 */
+       __u64   last_update_clock;              /* 0x0310 */
+       __u64   int_clock;                      /* 0x0318*/
+       __u64   mcck_clock;                     /* 0x0320 */
+       __u64   clock_comparator;               /* 0x0328 */
+       __u64   boot_clock[2];                  /* 0x0330 */
 
        /* Current process. */
-       __u64   current_task;                   /* 0x0338 */
-       __u64   kernel_stack;                   /* 0x0340 */
+       __u64   current_task;                   /* 0x0340 */
+       __u64   kernel_stack;                   /* 0x0348 */
 
        /* Interrupt, DAT-off and restartstack. */
-       __u64   async_stack;                    /* 0x0348 */
-       __u64   nodat_stack;                    /* 0x0350 */
-       __u64   restart_stack;                  /* 0x0358 */
+       __u64   async_stack;                    /* 0x0350 */
+       __u64   nodat_stack;                    /* 0x0358 */
+       __u64   restart_stack;                  /* 0x0360 */
 
        /* Restart function and parameter. */
-       __u64   restart_fn;                     /* 0x0360 */
-       __u64   restart_data;                   /* 0x0368 */
-       __u64   restart_source;                 /* 0x0370 */
+       __u64   restart_fn;                     /* 0x0368 */
+       __u64   restart_data;                   /* 0x0370 */
+       __u64   restart_source;                 /* 0x0378 */
 
        /* Address space pointer. */
-       __u64   kernel_asce;                    /* 0x0378 */
-       __u64   user_asce;                      /* 0x0380 */
-       __u64   vdso_asce;                      /* 0x0388 */
+       __u64   kernel_asce;                    /* 0x0380 */
+       __u64   user_asce;                      /* 0x0388 */
+       __u64   vdso_asce;                      /* 0x0390 */
 
        /*
         * The lpp and current_pid fields form a
         * 64-bit value that is set as program
         * parameter with the LPP instruction.
         */
-       __u32   lpp;                            /* 0x0390 */
-       __u32   current_pid;                    /* 0x0394 */
+       __u32   lpp;                            /* 0x0398 */
+       __u32   current_pid;                    /* 0x039c */
 
        /* SMP info area */
-       __u32   cpu_nr;                         /* 0x0398 */
-       __u32   softirq_pending;                /* 0x039c */
-       __u32   preempt_count;                  /* 0x03a0 */
-       __u32   spinlock_lockval;               /* 0x03a4 */
-       __u32   spinlock_index;                 /* 0x03a8 */
-       __u32   fpu_flags;                      /* 0x03ac */
-       __u64   percpu_offset;                  /* 0x03b0 */
-       __u64   vdso_per_cpu_data;              /* 0x03b8 */
-       __u64   machine_flags;                  /* 0x03c0 */
-       __u64   gmap;                           /* 0x03c8 */
-       __u8    pad_0x03d0[0x0400-0x03d0];      /* 0x03d0 */
+       __u32   cpu_nr;                         /* 0x03a0 */
+       __u32   softirq_pending;                /* 0x03a4 */
+       __u32   preempt_count;                  /* 0x03a8 */
+       __u32   spinlock_lockval;               /* 0x03ac */
+       __u32   spinlock_index;                 /* 0x03b0 */
+       __u32   fpu_flags;                      /* 0x03b4 */
+       __u64   percpu_offset;                  /* 0x03b8 */
+       __u64   vdso_per_cpu_data;              /* 0x03c0 */
+       __u64   machine_flags;                  /* 0x03c8 */
+       __u64   gmap;                           /* 0x03d0 */
+       __u8    pad_0x03d8[0x0400-0x03d8];      /* 0x03d8 */
 
        /* br %r1 trampoline */
        __u16   br_r1_trampoline;               /* 0x0400 */
index 96f9a9151fde02fc6f76633d76d292f47512d364..59c3e91f2cdb6636023eefc4b3a1dd7507f3b2f6 100644 (file)
@@ -56,40 +56,32 @@ static inline void syscall_set_return_value(struct task_struct *task,
 
 static inline void syscall_get_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         unsigned long *args)
 {
        unsigned long mask = -1UL;
+       unsigned int n = 6;
 
-       /*
-        * No arguments for this syscall, there's nothing to do.
-        */
-       if (!n)
-               return;
-
-       BUG_ON(i + n > 6);
 #ifdef CONFIG_COMPAT
        if (test_tsk_thread_flag(task, TIF_31BIT))
                mask = 0xffffffff;
 #endif
        while (n-- > 0)
-               if (i + n > 0)
-                       args[n] = regs->gprs[2 + i + n] & mask;
-       if (i == 0)
-               args[0] = regs->orig_gpr2 & mask;
+               if (n > 0)
+                       args[n] = regs->gprs[2 + n] & mask;
+
+       args[0] = regs->orig_gpr2 & mask;
 }
 
 static inline void syscall_set_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         const unsigned long *args)
 {
-       BUG_ON(i + n > 6);
+       unsigned int n = 6;
+
        while (n-- > 0)
-               if (i + n > 0)
-                       regs->gprs[2 + i + n] = args[n];
-       if (i == 0)
-               regs->orig_gpr2 = args[0];
+               if (n > 0)
+                       regs->gprs[2 + n] = args[n];
+       regs->orig_gpr2 = args[0];
 }
 
 static inline int syscall_get_arch(void)
index c6fad208c2fa5a8ffaad40d554c7597097d3e4fa..b6854812d2ed56f11cbd03865c16b26290518611 100644 (file)
@@ -196,23 +196,30 @@ static void cf_diag_perf_event_destroy(struct perf_event *event)
  */
 static int __hw_perf_event_init(struct perf_event *event)
 {
-       struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
        struct perf_event_attr *attr = &event->attr;
+       struct cpu_cf_events *cpuhw;
        enum cpumf_ctr_set i;
        int err = 0;
 
-       debug_sprintf_event(cf_diag_dbg, 5,
-                           "%s event %p cpu %d authorized %#x\n", __func__,
-                           event, event->cpu, cpuhw->info.auth_ctl);
+       debug_sprintf_event(cf_diag_dbg, 5, "%s event %p cpu %d\n", __func__,
+                           event, event->cpu);
 
        event->hw.config = attr->config;
        event->hw.config_base = 0;
-       local64_set(&event->count, 0);
 
-       /* Add all authorized counter sets to config_base */
+       /* Add all authorized counter sets to config_base. The
+        * the hardware init function is either called per-cpu or just once
+        * for all CPUS (event->cpu == -1).  This depends on the whether
+        * counting is started for all CPUs or on a per workload base where
+        * the perf event moves from one CPU to another CPU.
+        * Checking the authorization on any CPU is fine as the hardware
+        * applies the same authorization settings to all CPUs.
+        */
+       cpuhw = &get_cpu_var(cpu_cf_events);
        for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i)
                if (cpuhw->info.auth_ctl & cpumf_ctr_ctl[i])
                        event->hw.config_base |= cpumf_ctr_ctl[i];
+       put_cpu_var(cpu_cf_events);
 
        /* No authorized counter sets, nothing to count/sample */
        if (!event->hw.config_base) {
index 3fe1c77c361b98a9a4443bf1a2941f486d024030..bd197baf1dc337f018af35eeb19635b1c95998b7 100644 (file)
@@ -266,7 +266,8 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
        lc->percpu_offset = __per_cpu_offset[cpu];
        lc->kernel_asce = S390_lowcore.kernel_asce;
        lc->machine_flags = S390_lowcore.machine_flags;
-       lc->user_timer = lc->system_timer = lc->steal_timer = 0;
+       lc->user_timer = lc->system_timer =
+               lc->steal_timer = lc->avg_steal_timer = 0;
        __ctl_store(lc->cregs_save_area, 0, 15);
        save_access_regs((unsigned int *) lc->access_regs_save_area);
        memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
index 98f850e00008e99a1e64e8f20a74bbaaf4910636..a69a0911ed0e82720b10b124d0153681f2c821ea 100644 (file)
@@ -124,7 +124,7 @@ static void account_system_index_scaled(struct task_struct *p, u64 cputime,
  */
 static int do_account_vtime(struct task_struct *tsk)
 {
-       u64 timer, clock, user, guest, system, hardirq, softirq, steal;
+       u64 timer, clock, user, guest, system, hardirq, softirq;
 
        timer = S390_lowcore.last_update_timer;
        clock = S390_lowcore.last_update_clock;
@@ -182,12 +182,6 @@ static int do_account_vtime(struct task_struct *tsk)
        if (softirq)
                account_system_index_scaled(tsk, softirq, CPUTIME_SOFTIRQ);
 
-       steal = S390_lowcore.steal_timer;
-       if ((s64) steal > 0) {
-               S390_lowcore.steal_timer = 0;
-               account_steal_time(cputime_to_nsecs(steal));
-       }
-
        return virt_timer_forward(user + guest + system + hardirq + softirq);
 }
 
@@ -213,8 +207,19 @@ void vtime_task_switch(struct task_struct *prev)
  */
 void vtime_flush(struct task_struct *tsk)
 {
+       u64 steal, avg_steal;
+
        if (do_account_vtime(tsk))
                virt_timer_expire();
+
+       steal = S390_lowcore.steal_timer;
+       avg_steal = S390_lowcore.avg_steal_timer / 2;
+       if ((s64) steal > 0) {
+               S390_lowcore.steal_timer = 0;
+               account_steal_time(steal);
+               avg_steal += steal;
+       }
+       S390_lowcore.avg_steal_timer = avg_steal;
 }
 
 /*
index 958f46da3a7912cfd94a7b517e8e88b88fec3a5a..d91065e81a4e5cffcb2b86463c8dba9190c0d7fa 100644 (file)
@@ -164,10 +164,10 @@ static struct sh_machine_vector __initmv sh_of_generic_mv = {
 
 struct sh_clk_ops;
 
-void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx)
+void __init __weak arch_init_clk_ops(struct sh_clk_ops **ops, int idx)
 {
 }
 
-void __init plat_irq_setup(void)
+void __init __weak plat_irq_setup(void)
 {
 }
index a6ef3fee5f85714f69e6692e563491960372ab75..7bf2cb680d328462c4e621eae24005f1c9f35afc 100644 (file)
@@ -9,6 +9,7 @@ generic-y += emergency-restart.h
 generic-y += exec.h
 generic-y += irq_regs.h
 generic-y += irq_work.h
+generic-y += kvm_para.h
 generic-y += local.h
 generic-y += local64.h
 generic-y += mcs_spinlock.h
index 6e118799831c32dc37b8cf21960d284c5bec3646..8c9d7e5e5dcc02375eeafab25e47878b76239aaa 100644 (file)
@@ -48,51 +48,28 @@ static inline void syscall_set_return_value(struct task_struct *task,
 
 static inline void syscall_get_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         unsigned long *args)
 {
-       /*
-        * Do this simply for now. If we need to start supporting
-        * fetching arguments from arbitrary indices, this will need some
-        * extra logic. Presently there are no in-tree users that depend
-        * on this behaviour.
-        */
-       BUG_ON(i);
 
        /* Argument pattern is: R4, R5, R6, R7, R0, R1 */
-       switch (n) {
-       case 6: args[5] = regs->regs[1];
-       case 5: args[4] = regs->regs[0];
-       case 4: args[3] = regs->regs[7];
-       case 3: args[2] = regs->regs[6];
-       case 2: args[1] = regs->regs[5];
-       case 1: args[0] = regs->regs[4];
-       case 0:
-               break;
-       default:
-               BUG();
-       }
+       args[5] = regs->regs[1];
+       args[4] = regs->regs[0];
+       args[3] = regs->regs[7];
+       args[2] = regs->regs[6];
+       args[1] = regs->regs[5];
+       args[0] = regs->regs[4];
 }
 
 static inline void syscall_set_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         const unsigned long *args)
 {
-       /* Same note as above applies */
-       BUG_ON(i);
-
-       switch (n) {
-       case 6: regs->regs[1] = args[5];
-       case 5: regs->regs[0] = args[4];
-       case 4: regs->regs[7] = args[3];
-       case 3: regs->regs[6] = args[2];
-       case 2: regs->regs[5] = args[1];
-       case 1: regs->regs[4] = args[0];
-               break;
-       default:
-               BUG();
-       }
+       regs->regs[1] = args[5];
+       regs->regs[0] = args[4];
+       regs->regs[7] = args[3];
+       regs->regs[6] = args[2];
+       regs->regs[5] = args[1];
+       regs->regs[4] = args[0];
 }
 
 static inline int syscall_get_arch(void)
index 43882580c7f99bec93e519f1b4182c1daad2fbf0..22fad97da06619a137f6f4cd3e3ca4f6a9bddfcc 100644 (file)
@@ -47,20 +47,16 @@ static inline void syscall_set_return_value(struct task_struct *task,
 
 static inline void syscall_get_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         unsigned long *args)
 {
-       BUG_ON(i + n > 6);
-       memcpy(args, &regs->regs[2 + i], n * sizeof(args[0]));
+       memcpy(args, &regs->regs[2], 6 * sizeof(args[0]));
 }
 
 static inline void syscall_set_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         const unsigned long *args)
 {
-       BUG_ON(i + n > 6);
-       memcpy(&regs->regs[2 + i], args, n * sizeof(args[0]));
+       memcpy(&regs->regs[2], args, 6 * sizeof(args[0]));
 }
 
 static inline int syscall_get_arch(void)
index ecfbd40924dd948f97985e65daa18216c123c424..b8812c74c1dee1ec8dc80bac7ec4f80a4cc454a4 100644 (file)
@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: GPL-2.0
 
 generated-y += unistd_32.h
-generic-y += kvm_para.h
 generic-y += ucontext.h
index b82f64e28f55c12fc36f56897bcb5e80f44c9e2d..a22cfd5c0ee8665d96f40dcdfacd2c784a2fad62 100644 (file)
@@ -9,6 +9,7 @@ generic-y += exec.h
 generic-y += export.h
 generic-y += irq_regs.h
 generic-y += irq_work.h
+generic-y += kvm_para.h
 generic-y += linkage.h
 generic-y += local.h
 generic-y += local64.h
index 053989e3f6a6f1435323873ea010723ac09736bd..4d075434e8164c18e140249d65cbffdb28290dc6 100644 (file)
@@ -96,11 +96,11 @@ static inline void syscall_set_return_value(struct task_struct *task,
 
 static inline void syscall_get_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         unsigned long *args)
 {
        int zero_extend = 0;
        unsigned int j;
+       unsigned int n = 6;
 
 #ifdef CONFIG_SPARC64
        if (test_tsk_thread_flag(task, TIF_32BIT))
@@ -108,7 +108,7 @@ static inline void syscall_get_arguments(struct task_struct *task,
 #endif
 
        for (j = 0; j < n; j++) {
-               unsigned long val = regs->u_regs[UREG_I0 + i + j];
+               unsigned long val = regs->u_regs[UREG_I0 + j];
 
                if (zero_extend)
                        args[j] = (u32) val;
@@ -119,13 +119,12 @@ static inline void syscall_get_arguments(struct task_struct *task,
 
 static inline void syscall_set_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         const unsigned long *args)
 {
-       unsigned int j;
+       unsigned int i;
 
-       for (j = 0; j < n; j++)
-               regs->u_regs[UREG_I0 + i + j] = args[j];
+       for (i = 0; i < 6; i++)
+               regs->u_regs[UREG_I0 + i] = args[i];
 }
 
 static inline int syscall_get_arch(void)
diff --git a/arch/sparc/include/uapi/asm/kvm_para.h b/arch/sparc/include/uapi/asm/kvm_para.h
deleted file mode 100644 (file)
index baacc49..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#include <asm-generic/kvm_para.h>
index a8af6023c1263f7b43a4a52f52089235493923bd..14b93c5564e3572c07993c74217fb0b89ee36573 100644 (file)
@@ -73,6 +73,11 @@ static inline void iommu_batch_start(struct device *dev, unsigned long prot, uns
        p->npages       = 0;
 }
 
+static inline bool iommu_use_atu(struct iommu *iommu, u64 mask)
+{
+       return iommu->atu && mask > DMA_BIT_MASK(32);
+}
+
 /* Interrupts must be disabled.  */
 static long iommu_batch_flush(struct iommu_batch *p, u64 mask)
 {
@@ -92,7 +97,7 @@ static long iommu_batch_flush(struct iommu_batch *p, u64 mask)
                prot &= (HV_PCI_MAP_ATTR_READ | HV_PCI_MAP_ATTR_WRITE);
 
        while (npages != 0) {
-               if (mask <= DMA_BIT_MASK(32) || !pbm->iommu->atu) {
+               if (!iommu_use_atu(pbm->iommu, mask)) {
                        num = pci_sun4v_iommu_map(devhandle,
                                                  HV_PCI_TSBID(0, entry),
                                                  npages,
@@ -179,7 +184,6 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
        unsigned long flags, order, first_page, npages, n;
        unsigned long prot = 0;
        struct iommu *iommu;
-       struct atu *atu;
        struct iommu_map_table *tbl;
        struct page *page;
        void *ret;
@@ -205,13 +209,11 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
        memset((char *)first_page, 0, PAGE_SIZE << order);
 
        iommu = dev->archdata.iommu;
-       atu = iommu->atu;
-
        mask = dev->coherent_dma_mask;
-       if (mask <= DMA_BIT_MASK(32) || !atu)
+       if (!iommu_use_atu(iommu, mask))
                tbl = &iommu->tbl;
        else
-               tbl = &atu->tbl;
+               tbl = &iommu->atu->tbl;
 
        entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL,
                                      (unsigned long)(-1), 0);
@@ -333,7 +335,7 @@ static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
        atu = iommu->atu;
        devhandle = pbm->devhandle;
 
-       if (dvma <= DMA_BIT_MASK(32)) {
+       if (!iommu_use_atu(iommu, dvma)) {
                tbl = &iommu->tbl;
                iotsb_num = 0; /* we don't care for legacy iommu */
        } else {
@@ -374,7 +376,7 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
        npages >>= IO_PAGE_SHIFT;
 
        mask = *dev->dma_mask;
-       if (mask <= DMA_BIT_MASK(32))
+       if (!iommu_use_atu(iommu, mask))
                tbl = &iommu->tbl;
        else
                tbl = &atu->tbl;
@@ -510,7 +512,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
                                  IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
 
        mask = *dev->dma_mask;
-       if (mask <= DMA_BIT_MASK(32))
+       if (!iommu_use_atu(iommu, mask))
                tbl = &iommu->tbl;
        else
                tbl = &atu->tbl;
index 9fb9cf8cd39a3b29f45a80d1a4281d2abd82a262..98e50c50c12efb65ee100eecff49ea6a54e74853 100644 (file)
@@ -53,84 +53,30 @@ static inline void syscall_set_return_value(struct task_struct *task,
 
 static inline void syscall_get_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         unsigned long *args)
 {
        const struct uml_pt_regs *r = &regs->regs;
 
-       switch (i) {
-       case 0:
-               if (!n--)
-                       break;
-               *args++ = UPT_SYSCALL_ARG1(r);
-       case 1:
-               if (!n--)
-                       break;
-               *args++ = UPT_SYSCALL_ARG2(r);
-       case 2:
-               if (!n--)
-                       break;
-               *args++ = UPT_SYSCALL_ARG3(r);
-       case 3:
-               if (!n--)
-                       break;
-               *args++ = UPT_SYSCALL_ARG4(r);
-       case 4:
-               if (!n--)
-                       break;
-               *args++ = UPT_SYSCALL_ARG5(r);
-       case 5:
-               if (!n--)
-                       break;
-               *args++ = UPT_SYSCALL_ARG6(r);
-       case 6:
-               if (!n--)
-                       break;
-       default:
-               BUG();
-               break;
-       }
+       *args++ = UPT_SYSCALL_ARG1(r);
+       *args++ = UPT_SYSCALL_ARG2(r);
+       *args++ = UPT_SYSCALL_ARG3(r);
+       *args++ = UPT_SYSCALL_ARG4(r);
+       *args++ = UPT_SYSCALL_ARG5(r);
+       *args   = UPT_SYSCALL_ARG6(r);
 }
 
 static inline void syscall_set_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         const unsigned long *args)
 {
        struct uml_pt_regs *r = &regs->regs;
 
-       switch (i) {
-       case 0:
-               if (!n--)
-                       break;
-               UPT_SYSCALL_ARG1(r) = *args++;
-       case 1:
-               if (!n--)
-                       break;
-               UPT_SYSCALL_ARG2(r) = *args++;
-       case 2:
-               if (!n--)
-                       break;
-               UPT_SYSCALL_ARG3(r) = *args++;
-       case 3:
-               if (!n--)
-                       break;
-               UPT_SYSCALL_ARG4(r) = *args++;
-       case 4:
-               if (!n--)
-                       break;
-               UPT_SYSCALL_ARG5(r) = *args++;
-       case 5:
-               if (!n--)
-                       break;
-               UPT_SYSCALL_ARG6(r) = *args++;
-       case 6:
-               if (!n--)
-                       break;
-       default:
-               BUG();
-               break;
-       }
+       UPT_SYSCALL_ARG1(r) = *args++;
+       UPT_SYSCALL_ARG2(r) = *args++;
+       UPT_SYSCALL_ARG3(r) = *args++;
+       UPT_SYSCALL_ARG4(r) = *args++;
+       UPT_SYSCALL_ARG5(r) = *args++;
+       UPT_SYSCALL_ARG6(r) = *args;
 }
 
 /* See arch/x86/um/asm/syscall.h for syscall_get_arch() definition. */
index 1d1544b6ca74ce96e765c68cfa2f1a86a7ef7ac8..d77d953c04c1cfbe039bf207fa4db8b362e65f22 100644 (file)
@@ -18,6 +18,7 @@ generic-y += irq_work.h
 generic-y += kdebug.h
 generic-y += kmap_types.h
 generic-y += kprobes.h
+generic-y += kvm_para.h
 generic-y += local.h
 generic-y += mcs_spinlock.h
 generic-y += mm-arch-hooks.h
index 755bb11323d8feb92d12349bf444f151cd8acc29..1c72f04ff75da1a7f6918f00b14116a183a79313 100644 (file)
@@ -1,2 +1 @@
-generic-y += kvm_para.h
 generic-y += ucontext.h
index c1f9b3cf437c3aa9018ff3288282e575f870424c..5ad92419be19c5d67fae37158bac2b93e105d1bc 100644 (file)
@@ -2217,14 +2217,8 @@ config RANDOMIZE_MEMORY_PHYSICAL_PADDING
           If unsure, leave at the default value.
 
 config HOTPLUG_CPU
-       bool "Support for hot-pluggable CPUs"
+       def_bool y
        depends on SMP
-       ---help---
-         Say Y here to allow turning CPUs off and on. CPUs can be
-         controlled through /sys/devices/system/cpu.
-         ( Note: power management support will enable this option
-           automatically on SMP systems. )
-         Say N if you want to disable CPU hotplug.
 
 config BOOTPARAM_HOTPLUG_CPU0
        bool "Set default setting of cpu0_hotpluggable"
index 2d8b9d8ca4f8753291bb1487fb0b77d7b6009280..a587805c6687f6721ae8140da8144701c9abb49b 100644 (file)
@@ -219,8 +219,12 @@ ifdef CONFIG_RETPOLINE
   # Additionally, avoid generating expensive indirect jumps which
   # are subject to retpolines for small number of switch cases.
   # clang turns off jump table generation by default when under
-  # retpoline builds, however, gcc does not for x86.
-  KBUILD_CFLAGS += $(call cc-option,--param=case-values-threshold=20)
+  # retpoline builds, however, gcc does not for x86. This has
+  # only been fixed starting from gcc stable version 8.4.0 and
+  # onwards, but not for older ones. See gcc bug #86952.
+  ifndef CONFIG_CC_IS_CLANG
+    KBUILD_CFLAGS += $(call cc-option,-fno-jump-tables)
+  endif
 endif
 
 archscripts: scripts_basic
index fd13655e0f9b016baba58a3ed41832284d866fe3..d2f184165934c95a403faf489ec3b652850e74ff 100644 (file)
@@ -120,8 +120,6 @@ static inline void console_init(void)
 
 void set_sev_encryption_mask(void);
 
-#endif
-
 /* acpi.c */
 #ifdef CONFIG_ACPI
 acpi_physical_address get_rsdp_addr(void);
@@ -135,3 +133,5 @@ int count_immovable_mem_regions(void);
 #else
 static inline int count_immovable_mem_regions(void) { return 0; }
 #endif
+
+#endif /* BOOT_COMPRESSED_MISC_H */
index 7d2d7c801dba6abb226b630104d1f038242562cf..0ecfac84ba9111306faeb900375b6402d52afa07 100644 (file)
@@ -3,10 +3,14 @@
 #include <linux/types.h>
 #include <linux/init.h>
 #include <linux/slab.h>
+#include <linux/delay.h>
 #include <asm/apicdef.h>
+#include <asm/nmi.h>
 
 #include "../perf_event.h"
 
+static DEFINE_PER_CPU(unsigned int, perf_nmi_counter);
+
 static __initconst const u64 amd_hw_cache_event_ids
                                [PERF_COUNT_HW_CACHE_MAX]
                                [PERF_COUNT_HW_CACHE_OP_MAX]
@@ -429,6 +433,132 @@ static void amd_pmu_cpu_dead(int cpu)
        }
 }
 
+/*
+ * When a PMC counter overflows, an NMI is used to process the event and
+ * reset the counter. NMI latency can result in the counter being updated
+ * before the NMI can run, which can result in what appear to be spurious
+ * NMIs. This function is intended to wait for the NMI to run and reset
+ * the counter to avoid possible unhandled NMI messages.
+ */
+#define OVERFLOW_WAIT_COUNT    50
+
+static void amd_pmu_wait_on_overflow(int idx)
+{
+       unsigned int i;
+       u64 counter;
+
+       /*
+        * Wait for the counter to be reset if it has overflowed. This loop
+        * should exit very, very quickly, but just in case, don't wait
+        * forever...
+        */
+       for (i = 0; i < OVERFLOW_WAIT_COUNT; i++) {
+               rdmsrl(x86_pmu_event_addr(idx), counter);
+               if (counter & (1ULL << (x86_pmu.cntval_bits - 1)))
+                       break;
+
+               /* Might be in IRQ context, so can't sleep */
+               udelay(1);
+       }
+}
+
+static void amd_pmu_disable_all(void)
+{
+       struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+       int idx;
+
+       x86_pmu_disable_all();
+
+       /*
+        * This shouldn't be called from NMI context, but add a safeguard here
+        * to return, since if we're in NMI context we can't wait for an NMI
+        * to reset an overflowed counter value.
+        */
+       if (in_nmi())
+               return;
+
+       /*
+        * Check each counter for overflow and wait for it to be reset by the
+        * NMI if it has overflowed. This relies on the fact that all active
+        * counters are always enabled when this function is caled and
+        * ARCH_PERFMON_EVENTSEL_INT is always set.
+        */
+       for (idx = 0; idx < x86_pmu.num_counters; idx++) {
+               if (!test_bit(idx, cpuc->active_mask))
+                       continue;
+
+               amd_pmu_wait_on_overflow(idx);
+       }
+}
+
+static void amd_pmu_disable_event(struct perf_event *event)
+{
+       x86_pmu_disable_event(event);
+
+       /*
+        * This can be called from NMI context (via x86_pmu_stop). The counter
+        * may have overflowed, but either way, we'll never see it get reset
+        * by the NMI if we're already in the NMI. And the NMI latency support
+        * below will take care of any pending NMI that might have been
+        * generated by the overflow.
+        */
+       if (in_nmi())
+               return;
+
+       amd_pmu_wait_on_overflow(event->hw.idx);
+}
+
+/*
+ * Because of NMI latency, if multiple PMC counters are active or other sources
+ * of NMIs are received, the perf NMI handler can handle one or more overflowed
+ * PMC counters outside of the NMI associated with the PMC overflow. If the NMI
+ * doesn't arrive at the LAPIC in time to become a pending NMI, then the kernel
+ * back-to-back NMI support won't be active. This PMC handler needs to take into
+ * account that this can occur, otherwise this could result in unknown NMI
+ * messages being issued. Examples of this is PMC overflow while in the NMI
+ * handler when multiple PMCs are active or PMC overflow while handling some
+ * other source of an NMI.
+ *
+ * Attempt to mitigate this by using the number of active PMCs to determine
+ * whether to return NMI_HANDLED if the perf NMI handler did not handle/reset
+ * any PMCs. The per-CPU perf_nmi_counter variable is set to a minimum of the
+ * number of active PMCs or 2. The value of 2 is used in case an NMI does not
+ * arrive at the LAPIC in time to be collapsed into an already pending NMI.
+ */
+static int amd_pmu_handle_irq(struct pt_regs *regs)
+{
+       struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+       int active, handled;
+
+       /*
+        * Obtain the active count before calling x86_pmu_handle_irq() since
+        * it is possible that x86_pmu_handle_irq() may make a counter
+        * inactive (through x86_pmu_stop).
+        */
+       active = __bitmap_weight(cpuc->active_mask, X86_PMC_IDX_MAX);
+
+       /* Process any counter overflows */
+       handled = x86_pmu_handle_irq(regs);
+
+       /*
+        * If a counter was handled, record the number of possible remaining
+        * NMIs that can occur.
+        */
+       if (handled) {
+               this_cpu_write(perf_nmi_counter,
+                              min_t(unsigned int, 2, active));
+
+               return handled;
+       }
+
+       if (!this_cpu_read(perf_nmi_counter))
+               return NMI_DONE;
+
+       this_cpu_dec(perf_nmi_counter);
+
+       return NMI_HANDLED;
+}
+
 static struct event_constraint *
 amd_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
                          struct perf_event *event)
@@ -621,11 +751,11 @@ static ssize_t amd_event_sysfs_show(char *page, u64 config)
 
 static __initconst const struct x86_pmu amd_pmu = {
        .name                   = "AMD",
-       .handle_irq             = x86_pmu_handle_irq,
-       .disable_all            = x86_pmu_disable_all,
+       .handle_irq             = amd_pmu_handle_irq,
+       .disable_all            = amd_pmu_disable_all,
        .enable_all             = x86_pmu_enable_all,
        .enable                 = x86_pmu_enable_event,
-       .disable                = x86_pmu_disable_event,
+       .disable                = amd_pmu_disable_event,
        .hw_config              = amd_pmu_hw_config,
        .schedule_events        = x86_schedule_events,
        .eventsel               = MSR_K7_EVNTSEL0,
@@ -732,7 +862,7 @@ void amd_pmu_enable_virt(void)
        cpuc->perf_ctr_virt_mask = 0;
 
        /* Reload all events */
-       x86_pmu_disable_all();
+       amd_pmu_disable_all();
        x86_pmu_enable_all(0);
 }
 EXPORT_SYMBOL_GPL(amd_pmu_enable_virt);
@@ -750,7 +880,7 @@ void amd_pmu_disable_virt(void)
        cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY;
 
        /* Reload all events */
-       x86_pmu_disable_all();
+       amd_pmu_disable_all();
        x86_pmu_enable_all(0);
 }
 EXPORT_SYMBOL_GPL(amd_pmu_disable_virt);
index e2b1447192a888ffafb2883ddbdfbbd37c1e9315..81911e11a15dfcd7cff5694d0a2a83df769a655b 100644 (file)
@@ -1349,8 +1349,9 @@ void x86_pmu_stop(struct perf_event *event, int flags)
        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
        struct hw_perf_event *hwc = &event->hw;
 
-       if (__test_and_clear_bit(hwc->idx, cpuc->active_mask)) {
+       if (test_bit(hwc->idx, cpuc->active_mask)) {
                x86_pmu.disable(event);
+               __clear_bit(hwc->idx, cpuc->active_mask);
                cpuc->events[hwc->idx] = NULL;
                WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
                hwc->state |= PERF_HES_STOPPED;
@@ -1447,16 +1448,8 @@ int x86_pmu_handle_irq(struct pt_regs *regs)
        apic_write(APIC_LVTPC, APIC_DM_NMI);
 
        for (idx = 0; idx < x86_pmu.num_counters; idx++) {
-               if (!test_bit(idx, cpuc->active_mask)) {
-                       /*
-                        * Though we deactivated the counter some cpus
-                        * might still deliver spurious interrupts still
-                        * in flight. Catch them:
-                        */
-                       if (__test_and_clear_bit(idx, cpuc->running))
-                               handled++;
+               if (!test_bit(idx, cpuc->active_mask))
                        continue;
-               }
 
                event = cpuc->events[idx];
 
index 8baa441d8000f6c4efbde5afe72d4e5a518d2184..f61dcbef20ffee301a5904717846fe1d1c1c6f31 100644 (file)
@@ -3185,7 +3185,7 @@ static int intel_pmu_hw_config(struct perf_event *event)
                return ret;
 
        if (event->attr.precise_ip) {
-               if (!event->attr.freq) {
+               if (!(event->attr.freq || event->attr.wakeup_events)) {
                        event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD;
                        if (!(event->attr.sample_type &
                              ~intel_pmu_large_pebs_flags(event)))
@@ -3575,6 +3575,12 @@ static void intel_pmu_cpu_starting(int cpu)
 
        cpuc->lbr_sel = NULL;
 
+       if (x86_pmu.flags & PMU_FL_TFA) {
+               WARN_ON_ONCE(cpuc->tfa_shadow);
+               cpuc->tfa_shadow = ~0ULL;
+               intel_set_tfa(cpuc, false);
+       }
+
        if (x86_pmu.version > 1)
                flip_smm_bit(&x86_pmu.attr_freeze_on_smi);
 
index d153d570bb04755d9fb106e3375db55dd3114fd7..8e790ec219a5fd5be0e812736ff7be167a5cd20e 100644 (file)
  * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
  */
 
-#define BITOP_ADDR(x) "+m" (*(volatile long *) (x))
+#define RLONG_ADDR(x)                   "m" (*(volatile long *) (x))
+#define WBYTE_ADDR(x)                  "+m" (*(volatile char *) (x))
 
-#define ADDR                           BITOP_ADDR(addr)
+#define ADDR                           RLONG_ADDR(addr)
 
 /*
  * We do the locked ops that don't return the old value as
  * a mask operation on a byte.
  */
 #define IS_IMMEDIATE(nr)               (__builtin_constant_p(nr))
-#define CONST_MASK_ADDR(nr, addr)      BITOP_ADDR((void *)(addr) + ((nr)>>3))
+#define CONST_MASK_ADDR(nr, addr)      WBYTE_ADDR((void *)(addr) + ((nr)>>3))
 #define CONST_MASK(nr)                 (1 << ((nr) & 7))
 
 /**
@@ -73,7 +74,7 @@ set_bit(long nr, volatile unsigned long *addr)
                        : "memory");
        } else {
                asm volatile(LOCK_PREFIX __ASM_SIZE(bts) " %1,%0"
-                       : BITOP_ADDR(addr) : "Ir" (nr) : "memory");
+                       : : RLONG_ADDR(addr), "Ir" (nr) : "memory");
        }
 }
 
@@ -88,7 +89,7 @@ set_bit(long nr, volatile unsigned long *addr)
  */
 static __always_inline void __set_bit(long nr, volatile unsigned long *addr)
 {
-       asm volatile(__ASM_SIZE(bts) " %1,%0" : ADDR : "Ir" (nr) : "memory");
+       asm volatile(__ASM_SIZE(bts) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
 }
 
 /**
@@ -110,8 +111,7 @@ clear_bit(long nr, volatile unsigned long *addr)
                        : "iq" ((u8)~CONST_MASK(nr)));
        } else {
                asm volatile(LOCK_PREFIX __ASM_SIZE(btr) " %1,%0"
-                       : BITOP_ADDR(addr)
-                       : "Ir" (nr));
+                       : : RLONG_ADDR(addr), "Ir" (nr) : "memory");
        }
 }
 
@@ -131,7 +131,7 @@ static __always_inline void clear_bit_unlock(long nr, volatile unsigned long *ad
 
 static __always_inline void __clear_bit(long nr, volatile unsigned long *addr)
 {
-       asm volatile(__ASM_SIZE(btr) " %1,%0" : ADDR : "Ir" (nr));
+       asm volatile(__ASM_SIZE(btr) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
 }
 
 static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr)
@@ -139,7 +139,7 @@ static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile
        bool negative;
        asm volatile(LOCK_PREFIX "andb %2,%1"
                CC_SET(s)
-               : CC_OUT(s) (negative), ADDR
+               : CC_OUT(s) (negative), WBYTE_ADDR(addr)
                : "ir" ((char) ~(1 << nr)) : "memory");
        return negative;
 }
@@ -155,13 +155,9 @@ static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile
  * __clear_bit() is non-atomic and implies release semantics before the memory
  * operation. It can be used for an unlock if no other CPUs can concurrently
  * modify other bits in the word.
- *
- * No memory barrier is required here, because x86 cannot reorder stores past
- * older loads. Same principle as spin_unlock.
  */
 static __always_inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
 {
-       barrier();
        __clear_bit(nr, addr);
 }
 
@@ -176,7 +172,7 @@ static __always_inline void __clear_bit_unlock(long nr, volatile unsigned long *
  */
 static __always_inline void __change_bit(long nr, volatile unsigned long *addr)
 {
-       asm volatile(__ASM_SIZE(btc) " %1,%0" : ADDR : "Ir" (nr));
+       asm volatile(__ASM_SIZE(btc) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
 }
 
 /**
@@ -196,8 +192,7 @@ static __always_inline void change_bit(long nr, volatile unsigned long *addr)
                        : "iq" ((u8)CONST_MASK(nr)));
        } else {
                asm volatile(LOCK_PREFIX __ASM_SIZE(btc) " %1,%0"
-                       : BITOP_ADDR(addr)
-                       : "Ir" (nr));
+                       : : RLONG_ADDR(addr), "Ir" (nr) : "memory");
        }
 }
 
@@ -242,8 +237,8 @@ static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long *
 
        asm(__ASM_SIZE(bts) " %2,%1"
            CC_SET(c)
-           : CC_OUT(c) (oldbit), ADDR
-           : "Ir" (nr));
+           : CC_OUT(c) (oldbit)
+           : ADDR, "Ir" (nr) : "memory");
        return oldbit;
 }
 
@@ -282,8 +277,8 @@ static __always_inline bool __test_and_clear_bit(long nr, volatile unsigned long
 
        asm volatile(__ASM_SIZE(btr) " %2,%1"
                     CC_SET(c)
-                    : CC_OUT(c) (oldbit), ADDR
-                    : "Ir" (nr));
+                    : CC_OUT(c) (oldbit)
+                    : ADDR, "Ir" (nr) : "memory");
        return oldbit;
 }
 
@@ -294,8 +289,8 @@ static __always_inline bool __test_and_change_bit(long nr, volatile unsigned lon
 
        asm volatile(__ASM_SIZE(btc) " %2,%1"
                     CC_SET(c)
-                    : CC_OUT(c) (oldbit), ADDR
-                    : "Ir" (nr) : "memory");
+                    : CC_OUT(c) (oldbit)
+                    : ADDR, "Ir" (nr) : "memory");
 
        return oldbit;
 }
@@ -326,7 +321,7 @@ static __always_inline bool variable_test_bit(long nr, volatile const unsigned l
        asm volatile(__ASM_SIZE(bt) " %2,%1"
                     CC_SET(c)
                     : CC_OUT(c) (oldbit)
-                    : "m" (*(unsigned long *)addr), "Ir" (nr));
+                    : "m" (*(unsigned long *)addr), "Ir" (nr) : "memory");
 
        return oldbit;
 }
index ce95b8cbd2296b1e33de2e0f520a00f3981e3f23..0e56ff7e484857a1fdd8673fdfa2e0b784e23cea 100644 (file)
@@ -112,8 +112,9 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
         test_cpu_cap(c, bit))
 
 #define this_cpu_has(bit)                                              \
-       (__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 :  \
-        x86_this_cpu_test_bit(bit, (unsigned long *)&cpu_info.x86_capability))
+       (__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 :  \
+        x86_this_cpu_test_bit(bit,                                     \
+               (unsigned long __percpu *)&cpu_info.x86_capability))
 
 /*
  * This macro is for detection of features which need kernel
index 93c4bf598fb06c7e53865141dd3e7faa514194ff..feab24cac610e25f276d3d1f71f4705c23106b00 100644 (file)
@@ -226,7 +226,9 @@ struct x86_emulate_ops {
 
        unsigned (*get_hflags)(struct x86_emulate_ctxt *ctxt);
        void (*set_hflags)(struct x86_emulate_ctxt *ctxt, unsigned hflags);
-       int (*pre_leave_smm)(struct x86_emulate_ctxt *ctxt, u64 smbase);
+       int (*pre_leave_smm)(struct x86_emulate_ctxt *ctxt,
+                            const char *smstate);
+       void (*post_leave_smm)(struct x86_emulate_ctxt *ctxt);
 
 };
 
index a5db4475e72db63031284ecb986c445d7c835eb8..a9d03af340307db6589376cf3bfb29a533910cdd 100644 (file)
@@ -126,7 +126,7 @@ static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
 }
 
 #define KVM_PERMILLE_MMU_PAGES 20
-#define KVM_MIN_ALLOC_MMU_PAGES 64
+#define KVM_MIN_ALLOC_MMU_PAGES 64UL
 #define KVM_MMU_HASH_SHIFT 12
 #define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT)
 #define KVM_MIN_FREE_MMU_PAGES 5
@@ -253,14 +253,14 @@ struct kvm_mmu_memory_cache {
  * kvm_memory_slot.arch.gfn_track which is 16 bits, so the role bits used
  * by indirect shadow page can not be more than 15 bits.
  *
- * Currently, we used 14 bits that are @level, @cr4_pae, @quadrant, @access,
+ * Currently, we used 14 bits that are @level, @gpte_is_8_bytes, @quadrant, @access,
  * @nxe, @cr0_wp, @smep_andnot_wp and @smap_andnot_wp.
  */
 union kvm_mmu_page_role {
        u32 word;
        struct {
                unsigned level:4;
-               unsigned cr4_pae:1;
+               unsigned gpte_is_8_bytes:1;
                unsigned quadrant:2;
                unsigned direct:1;
                unsigned access:3;
@@ -350,6 +350,7 @@ struct kvm_mmu_page {
 };
 
 struct kvm_pio_request {
+       unsigned long linear_rip;
        unsigned long count;
        int in;
        int port;
@@ -568,6 +569,7 @@ struct kvm_vcpu_arch {
        bool tpr_access_reporting;
        u64 ia32_xss;
        u64 microcode_version;
+       u64 arch_capabilities;
 
        /*
         * Paging state of the vcpu
@@ -842,9 +844,9 @@ enum kvm_irqchip_mode {
 };
 
 struct kvm_arch {
-       unsigned int n_used_mmu_pages;
-       unsigned int n_requested_mmu_pages;
-       unsigned int n_max_mmu_pages;
+       unsigned long n_used_mmu_pages;
+       unsigned long n_requested_mmu_pages;
+       unsigned long n_max_mmu_pages;
        unsigned int indirect_shadow_pages;
        struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
        /*
@@ -1180,7 +1182,7 @@ struct kvm_x86_ops {
 
        int (*smi_allowed)(struct kvm_vcpu *vcpu);
        int (*pre_enter_smm)(struct kvm_vcpu *vcpu, char *smstate);
-       int (*pre_leave_smm)(struct kvm_vcpu *vcpu, u64 smbase);
+       int (*pre_leave_smm)(struct kvm_vcpu *vcpu, const char *smstate);
        int (*enable_smi_window)(struct kvm_vcpu *vcpu);
 
        int (*mem_enc_op)(struct kvm *kvm, void __user *argp);
@@ -1192,6 +1194,8 @@ struct kvm_x86_ops {
        int (*nested_enable_evmcs)(struct kvm_vcpu *vcpu,
                                   uint16_t *vmcs_version);
        uint16_t (*nested_get_evmcs_version)(struct kvm_vcpu *vcpu);
+
+       bool (*need_emulation_on_page_fault)(struct kvm_vcpu *vcpu);
 };
 
 struct kvm_arch_async_pf {
@@ -1252,8 +1256,8 @@ void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
                                   gfn_t gfn_offset, unsigned long mask);
 void kvm_mmu_zap_all(struct kvm *kvm);
 void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen);
-unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
-void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
+unsigned long kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm);
+void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long kvm_nr_mmu_pages);
 
 int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3);
 bool pdptrs_changed(struct kvm_vcpu *vcpu);
@@ -1588,4 +1592,7 @@ static inline int kvm_cpu_get_apicid(int mps_cpu)
 #define put_smstate(type, buf, offset, val)                      \
        *(type *)((buf) + (offset) - 0x7e00) = val
 
+#define GET_SMSTATE(type, buf, offset)         \
+       (*(type *)((buf) + (offset) - 0x7e00))
+
 #endif /* _ASM_X86_KVM_HOST_H */
index 63b3393bd98ea2caaa67aefeeab359cbac72dc51..c53682303c9c1252a79d90cf8dd19a96d126db93 100644 (file)
@@ -77,7 +77,11 @@ static inline size_t real_mode_size_needed(void)
        return ALIGN(real_mode_blob_end - real_mode_blob, PAGE_SIZE);
 }
 
-void set_real_mode_mem(phys_addr_t mem, size_t size);
+static inline void set_real_mode_mem(phys_addr_t mem)
+{
+       real_mode_header = (struct real_mode_header *) __va(mem);
+}
+
 void reserve_real_mode(void);
 
 #endif /* __ASSEMBLY__ */
index d653139857af2a1121f877b611c8d56d4e4690f0..4c305471ec3312e3b0adc7063c30e9d3edf2de7f 100644 (file)
@@ -91,11 +91,9 @@ static inline void syscall_set_return_value(struct task_struct *task,
 
 static inline void syscall_get_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         unsigned long *args)
 {
-       BUG_ON(i + n > 6);
-       memcpy(args, &regs->bx + i, n * sizeof(args[0]));
+       memcpy(args, &regs->bx, 6 * sizeof(args[0]));
 }
 
 static inline void syscall_set_arguments(struct task_struct *task,
@@ -116,124 +114,50 @@ static inline int syscall_get_arch(void)
 
 static inline void syscall_get_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         unsigned long *args)
 {
 # ifdef CONFIG_IA32_EMULATION
-       if (task->thread_info.status & TS_COMPAT)
-               switch (i) {
-               case 0:
-                       if (!n--) break;
-                       *args++ = regs->bx;
-               case 1:
-                       if (!n--) break;
-                       *args++ = regs->cx;
-               case 2:
-                       if (!n--) break;
-                       *args++ = regs->dx;
-               case 3:
-                       if (!n--) break;
-                       *args++ = regs->si;
-               case 4:
-                       if (!n--) break;
-                       *args++ = regs->di;
-               case 5:
-                       if (!n--) break;
-                       *args++ = regs->bp;
-               case 6:
-                       if (!n--) break;
-               default:
-                       BUG();
-                       break;
-               }
-       else
+       if (task->thread_info.status & TS_COMPAT) {
+               *args++ = regs->bx;
+               *args++ = regs->cx;
+               *args++ = regs->dx;
+               *args++ = regs->si;
+               *args++ = regs->di;
+               *args   = regs->bp;
+       } else
 # endif
-               switch (i) {
-               case 0:
-                       if (!n--) break;
-                       *args++ = regs->di;
-               case 1:
-                       if (!n--) break;
-                       *args++ = regs->si;
-               case 2:
-                       if (!n--) break;
-                       *args++ = regs->dx;
-               case 3:
-                       if (!n--) break;
-                       *args++ = regs->r10;
-               case 4:
-                       if (!n--) break;
-                       *args++ = regs->r8;
-               case 5:
-                       if (!n--) break;
-                       *args++ = regs->r9;
-               case 6:
-                       if (!n--) break;
-               default:
-                       BUG();
-                       break;
-               }
+       {
+               *args++ = regs->di;
+               *args++ = regs->si;
+               *args++ = regs->dx;
+               *args++ = regs->r10;
+               *args++ = regs->r8;
+               *args   = regs->r9;
+       }
 }
 
 static inline void syscall_set_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         const unsigned long *args)
 {
 # ifdef CONFIG_IA32_EMULATION
-       if (task->thread_info.status & TS_COMPAT)
-               switch (i) {
-               case 0:
-                       if (!n--) break;
-                       regs->bx = *args++;
-               case 1:
-                       if (!n--) break;
-                       regs->cx = *args++;
-               case 2:
-                       if (!n--) break;
-                       regs->dx = *args++;
-               case 3:
-                       if (!n--) break;
-                       regs->si = *args++;
-               case 4:
-                       if (!n--) break;
-                       regs->di = *args++;
-               case 5:
-                       if (!n--) break;
-                       regs->bp = *args++;
-               case 6:
-                       if (!n--) break;
-               default:
-                       BUG();
-                       break;
-               }
-       else
+       if (task->thread_info.status & TS_COMPAT) {
+               regs->bx = *args++;
+               regs->cx = *args++;
+               regs->dx = *args++;
+               regs->si = *args++;
+               regs->di = *args++;
+               regs->bp = *args;
+       } else
 # endif
-               switch (i) {
-               case 0:
-                       if (!n--) break;
-                       regs->di = *args++;
-               case 1:
-                       if (!n--) break;
-                       regs->si = *args++;
-               case 2:
-                       if (!n--) break;
-                       regs->dx = *args++;
-               case 3:
-                       if (!n--) break;
-                       regs->r10 = *args++;
-               case 4:
-                       if (!n--) break;
-                       regs->r8 = *args++;
-               case 5:
-                       if (!n--) break;
-                       regs->r9 = *args++;
-               case 6:
-                       if (!n--) break;
-               default:
-                       BUG();
-                       break;
-               }
+       {
+               regs->di = *args++;
+               regs->si = *args++;
+               regs->dx = *args++;
+               regs->r10 = *args++;
+               regs->r8 = *args++;
+               regs->r9 = *args;
+       }
 }
 
 static inline int syscall_get_arch(void)
index de6f0d59a24f418febf72e40dd595e41dcb3c7c0..2863c2026655815c2237a939d66e390e3a7623bf 100644 (file)
@@ -206,6 +206,9 @@ xen_single_call(unsigned int call,
        __HYPERCALL_DECLS;
        __HYPERCALL_5ARG(a1, a2, a3, a4, a5);
 
+       if (call >= PAGE_SIZE / sizeof(hypercall_page[0]))
+               return -EINVAL;
+
        asm volatile(CALL_NOSPEC
                     : __HYPERCALL_5PARAM
                     : [thunk_target] "a" (&hypercall_page[call])
index f0b0c90dd398246eb2882050d69c6b53ccca11af..d213ec5c3766db0dd5176c951b13e5f3c1514cfb 100644 (file)
 
 #define VMX_ABORT_SAVE_GUEST_MSR_FAIL        1
 #define VMX_ABORT_LOAD_HOST_PDPTE_FAIL       2
+#define VMX_ABORT_VMCS_CORRUPTED             3
 #define VMX_ABORT_LOAD_HOST_MSR_FAIL         4
 
 #endif /* _UAPIVMX_H */
index f33f11f69078e7f4497e48ae771466b74a4405a2..1573a0a6b52530f1759429bbee15476e29e31126 100644 (file)
@@ -501,11 +501,8 @@ void cqm_handle_limbo(struct work_struct *work)
 void cqm_setup_limbo_handler(struct rdt_domain *dom, unsigned long delay_ms)
 {
        unsigned long delay = msecs_to_jiffies(delay_ms);
-       struct rdt_resource *r;
        int cpu;
 
-       r = &rdt_resources_all[RDT_RESOURCE_L3];
-
        cpu = cpumask_any(&dom->cpu_mask);
        dom->cqm_work_cpu = cpu;
 
index 399601eda8e43c2cf8a855b44b4dc811a247c9e5..54b9eef3eea97189a032cccf56ec4e001cc77ec5 100644 (file)
@@ -2039,14 +2039,14 @@ static int rdt_get_tree(struct fs_context *fc)
 enum rdt_param {
        Opt_cdp,
        Opt_cdpl2,
-       Opt_mba_mpbs,
+       Opt_mba_mbps,
        nr__rdt_params
 };
 
 static const struct fs_parameter_spec rdt_param_specs[] = {
        fsparam_flag("cdp",             Opt_cdp),
        fsparam_flag("cdpl2",           Opt_cdpl2),
-       fsparam_flag("mba_mpbs",        Opt_mba_mpbs),
+       fsparam_flag("mba_MBps",        Opt_mba_mbps),
        {}
 };
 
@@ -2072,7 +2072,7 @@ static int rdt_parse_param(struct fs_context *fc, struct fs_parameter *param)
        case Opt_cdpl2:
                ctx->enable_cdpl2 = true;
                return 0;
-       case Opt_mba_mpbs:
+       case Opt_mba_mbps:
                if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
                        return -EINVAL;
                ctx->enable_mba_mbps = true;
index c338984c850d28a1213e46f86efc06d425115660..d0d5dd44b4f478524cc959cefb245695d9e40894 100644 (file)
@@ -2331,24 +2331,18 @@ static int em_lseg(struct x86_emulate_ctxt *ctxt)
 
 static int emulator_has_longmode(struct x86_emulate_ctxt *ctxt)
 {
+#ifdef CONFIG_X86_64
        u32 eax, ebx, ecx, edx;
 
        eax = 0x80000001;
        ecx = 0;
        ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
        return edx & bit(X86_FEATURE_LM);
+#else
+       return false;
+#endif
 }
 
-#define GET_SMSTATE(type, smbase, offset)                                \
-       ({                                                                \
-        type __val;                                                      \
-        int r = ctxt->ops->read_phys(ctxt, smbase + offset, &__val,      \
-                                     sizeof(__val));                     \
-        if (r != X86EMUL_CONTINUE)                                       \
-                return X86EMUL_UNHANDLEABLE;                             \
-        __val;                                                           \
-       })
-
 static void rsm_set_desc_flags(struct desc_struct *desc, u32 flags)
 {
        desc->g    = (flags >> 23) & 1;
@@ -2361,27 +2355,30 @@ static void rsm_set_desc_flags(struct desc_struct *desc, u32 flags)
        desc->type = (flags >>  8) & 15;
 }
 
-static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
+static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, const char *smstate,
+                          int n)
 {
        struct desc_struct desc;
        int offset;
        u16 selector;
 
-       selector = GET_SMSTATE(u32, smbase, 0x7fa8 + n * 4);
+       selector = GET_SMSTATE(u32, smstate, 0x7fa8 + n * 4);
 
        if (n < 3)
                offset = 0x7f84 + n * 12;
        else
                offset = 0x7f2c + (n - 3) * 12;
 
-       set_desc_base(&desc,      GET_SMSTATE(u32, smbase, offset + 8));
-       set_desc_limit(&desc,     GET_SMSTATE(u32, smbase, offset + 4));
-       rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, offset));
+       set_desc_base(&desc,      GET_SMSTATE(u32, smstate, offset + 8));
+       set_desc_limit(&desc,     GET_SMSTATE(u32, smstate, offset + 4));
+       rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, offset));
        ctxt->ops->set_segment(ctxt, selector, &desc, 0, n);
        return X86EMUL_CONTINUE;
 }
 
-static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
+#ifdef CONFIG_X86_64
+static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, const char *smstate,
+                          int n)
 {
        struct desc_struct desc;
        int offset;
@@ -2390,15 +2387,16 @@ static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
 
        offset = 0x7e00 + n * 16;
 
-       selector =                GET_SMSTATE(u16, smbase, offset);
-       rsm_set_desc_flags(&desc, GET_SMSTATE(u16, smbase, offset + 2) << 8);
-       set_desc_limit(&desc,     GET_SMSTATE(u32, smbase, offset + 4));
-       set_desc_base(&desc,      GET_SMSTATE(u32, smbase, offset + 8));
-       base3 =                   GET_SMSTATE(u32, smbase, offset + 12);
+       selector =                GET_SMSTATE(u16, smstate, offset);
+       rsm_set_desc_flags(&desc, GET_SMSTATE(u16, smstate, offset + 2) << 8);
+       set_desc_limit(&desc,     GET_SMSTATE(u32, smstate, offset + 4));
+       set_desc_base(&desc,      GET_SMSTATE(u32, smstate, offset + 8));
+       base3 =                   GET_SMSTATE(u32, smstate, offset + 12);
 
        ctxt->ops->set_segment(ctxt, selector, &desc, base3, n);
        return X86EMUL_CONTINUE;
 }
+#endif
 
 static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
                                    u64 cr0, u64 cr3, u64 cr4)
@@ -2445,7 +2443,8 @@ static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
        return X86EMUL_CONTINUE;
 }
 
-static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase)
+static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt,
+                            const char *smstate)
 {
        struct desc_struct desc;
        struct desc_ptr dt;
@@ -2453,53 +2452,55 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase)
        u32 val, cr0, cr3, cr4;
        int i;
 
-       cr0 =                      GET_SMSTATE(u32, smbase, 0x7ffc);
-       cr3 =                      GET_SMSTATE(u32, smbase, 0x7ff8);
-       ctxt->eflags =             GET_SMSTATE(u32, smbase, 0x7ff4) | X86_EFLAGS_FIXED;
-       ctxt->_eip =               GET_SMSTATE(u32, smbase, 0x7ff0);
+       cr0 =                      GET_SMSTATE(u32, smstate, 0x7ffc);
+       cr3 =                      GET_SMSTATE(u32, smstate, 0x7ff8);
+       ctxt->eflags =             GET_SMSTATE(u32, smstate, 0x7ff4) | X86_EFLAGS_FIXED;
+       ctxt->_eip =               GET_SMSTATE(u32, smstate, 0x7ff0);
 
        for (i = 0; i < 8; i++)
-               *reg_write(ctxt, i) = GET_SMSTATE(u32, smbase, 0x7fd0 + i * 4);
+               *reg_write(ctxt, i) = GET_SMSTATE(u32, smstate, 0x7fd0 + i * 4);
 
-       val = GET_SMSTATE(u32, smbase, 0x7fcc);
+       val = GET_SMSTATE(u32, smstate, 0x7fcc);
        ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
-       val = GET_SMSTATE(u32, smbase, 0x7fc8);
+       val = GET_SMSTATE(u32, smstate, 0x7fc8);
        ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
 
-       selector =                 GET_SMSTATE(u32, smbase, 0x7fc4);
-       set_desc_base(&desc,       GET_SMSTATE(u32, smbase, 0x7f64));
-       set_desc_limit(&desc,      GET_SMSTATE(u32, smbase, 0x7f60));
-       rsm_set_desc_flags(&desc,  GET_SMSTATE(u32, smbase, 0x7f5c));
+       selector =                 GET_SMSTATE(u32, smstate, 0x7fc4);
+       set_desc_base(&desc,       GET_SMSTATE(u32, smstate, 0x7f64));
+       set_desc_limit(&desc,      GET_SMSTATE(u32, smstate, 0x7f60));
+       rsm_set_desc_flags(&desc,  GET_SMSTATE(u32, smstate, 0x7f5c));
        ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_TR);
 
-       selector =                 GET_SMSTATE(u32, smbase, 0x7fc0);
-       set_desc_base(&desc,       GET_SMSTATE(u32, smbase, 0x7f80));
-       set_desc_limit(&desc,      GET_SMSTATE(u32, smbase, 0x7f7c));
-       rsm_set_desc_flags(&desc,  GET_SMSTATE(u32, smbase, 0x7f78));
+       selector =                 GET_SMSTATE(u32, smstate, 0x7fc0);
+       set_desc_base(&desc,       GET_SMSTATE(u32, smstate, 0x7f80));
+       set_desc_limit(&desc,      GET_SMSTATE(u32, smstate, 0x7f7c));
+       rsm_set_desc_flags(&desc,  GET_SMSTATE(u32, smstate, 0x7f78));
        ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_LDTR);
 
-       dt.address =               GET_SMSTATE(u32, smbase, 0x7f74);
-       dt.size =                  GET_SMSTATE(u32, smbase, 0x7f70);
+       dt.address =               GET_SMSTATE(u32, smstate, 0x7f74);
+       dt.size =                  GET_SMSTATE(u32, smstate, 0x7f70);
        ctxt->ops->set_gdt(ctxt, &dt);
 
-       dt.address =               GET_SMSTATE(u32, smbase, 0x7f58);
-       dt.size =                  GET_SMSTATE(u32, smbase, 0x7f54);
+       dt.address =               GET_SMSTATE(u32, smstate, 0x7f58);
+       dt.size =                  GET_SMSTATE(u32, smstate, 0x7f54);
        ctxt->ops->set_idt(ctxt, &dt);
 
        for (i = 0; i < 6; i++) {
-               int r = rsm_load_seg_32(ctxt, smbase, i);
+               int r = rsm_load_seg_32(ctxt, smstate, i);
                if (r != X86EMUL_CONTINUE)
                        return r;
        }
 
-       cr4 = GET_SMSTATE(u32, smbase, 0x7f14);
+       cr4 = GET_SMSTATE(u32, smstate, 0x7f14);
 
-       ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7ef8));
+       ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smstate, 0x7ef8));
 
        return rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
 }
 
-static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
+#ifdef CONFIG_X86_64
+static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt,
+                            const char *smstate)
 {
        struct desc_struct desc;
        struct desc_ptr dt;
@@ -2509,43 +2510,43 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
        int i, r;
 
        for (i = 0; i < 16; i++)
-               *reg_write(ctxt, i) = GET_SMSTATE(u64, smbase, 0x7ff8 - i * 8);
+               *reg_write(ctxt, i) = GET_SMSTATE(u64, smstate, 0x7ff8 - i * 8);
 
-       ctxt->_eip   = GET_SMSTATE(u64, smbase, 0x7f78);
-       ctxt->eflags = GET_SMSTATE(u32, smbase, 0x7f70) | X86_EFLAGS_FIXED;
+       ctxt->_eip   = GET_SMSTATE(u64, smstate, 0x7f78);
+       ctxt->eflags = GET_SMSTATE(u32, smstate, 0x7f70) | X86_EFLAGS_FIXED;
 
-       val = GET_SMSTATE(u32, smbase, 0x7f68);
+       val = GET_SMSTATE(u32, smstate, 0x7f68);
        ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
-       val = GET_SMSTATE(u32, smbase, 0x7f60);
+       val = GET_SMSTATE(u32, smstate, 0x7f60);
        ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
 
-       cr0 =                       GET_SMSTATE(u64, smbase, 0x7f58);
-       cr3 =                       GET_SMSTATE(u64, smbase, 0x7f50);
-       cr4 =                       GET_SMSTATE(u64, smbase, 0x7f48);
-       ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7f00));
-       val =                       GET_SMSTATE(u64, smbase, 0x7ed0);
+       cr0 =                       GET_SMSTATE(u64, smstate, 0x7f58);
+       cr3 =                       GET_SMSTATE(u64, smstate, 0x7f50);
+       cr4 =                       GET_SMSTATE(u64, smstate, 0x7f48);
+       ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smstate, 0x7f00));
+       val =                       GET_SMSTATE(u64, smstate, 0x7ed0);
        ctxt->ops->set_msr(ctxt, MSR_EFER, val & ~EFER_LMA);
 
-       selector =                  GET_SMSTATE(u32, smbase, 0x7e90);
-       rsm_set_desc_flags(&desc,   GET_SMSTATE(u32, smbase, 0x7e92) << 8);
-       set_desc_limit(&desc,       GET_SMSTATE(u32, smbase, 0x7e94));
-       set_desc_base(&desc,        GET_SMSTATE(u32, smbase, 0x7e98));
-       base3 =                     GET_SMSTATE(u32, smbase, 0x7e9c);
+       selector =                  GET_SMSTATE(u32, smstate, 0x7e90);
+       rsm_set_desc_flags(&desc,   GET_SMSTATE(u32, smstate, 0x7e92) << 8);
+       set_desc_limit(&desc,       GET_SMSTATE(u32, smstate, 0x7e94));
+       set_desc_base(&desc,        GET_SMSTATE(u32, smstate, 0x7e98));
+       base3 =                     GET_SMSTATE(u32, smstate, 0x7e9c);
        ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_TR);
 
-       dt.size =                   GET_SMSTATE(u32, smbase, 0x7e84);
-       dt.address =                GET_SMSTATE(u64, smbase, 0x7e88);
+       dt.size =                   GET_SMSTATE(u32, smstate, 0x7e84);
+       dt.address =                GET_SMSTATE(u64, smstate, 0x7e88);
        ctxt->ops->set_idt(ctxt, &dt);
 
-       selector =                  GET_SMSTATE(u32, smbase, 0x7e70);
-       rsm_set_desc_flags(&desc,   GET_SMSTATE(u32, smbase, 0x7e72) << 8);
-       set_desc_limit(&desc,       GET_SMSTATE(u32, smbase, 0x7e74));
-       set_desc_base(&desc,        GET_SMSTATE(u32, smbase, 0x7e78));
-       base3 =                     GET_SMSTATE(u32, smbase, 0x7e7c);
+       selector =                  GET_SMSTATE(u32, smstate, 0x7e70);
+       rsm_set_desc_flags(&desc,   GET_SMSTATE(u32, smstate, 0x7e72) << 8);
+       set_desc_limit(&desc,       GET_SMSTATE(u32, smstate, 0x7e74));
+       set_desc_base(&desc,        GET_SMSTATE(u32, smstate, 0x7e78));
+       base3 =                     GET_SMSTATE(u32, smstate, 0x7e7c);
        ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_LDTR);
 
-       dt.size =                   GET_SMSTATE(u32, smbase, 0x7e64);
-       dt.address =                GET_SMSTATE(u64, smbase, 0x7e68);
+       dt.size =                   GET_SMSTATE(u32, smstate, 0x7e64);
+       dt.address =                GET_SMSTATE(u64, smstate, 0x7e68);
        ctxt->ops->set_gdt(ctxt, &dt);
 
        r = rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
@@ -2553,37 +2554,49 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
                return r;
 
        for (i = 0; i < 6; i++) {
-               r = rsm_load_seg_64(ctxt, smbase, i);
+               r = rsm_load_seg_64(ctxt, smstate, i);
                if (r != X86EMUL_CONTINUE)
                        return r;
        }
 
        return X86EMUL_CONTINUE;
 }
+#endif
 
 static int em_rsm(struct x86_emulate_ctxt *ctxt)
 {
        unsigned long cr0, cr4, efer;
+       char buf[512];
        u64 smbase;
        int ret;
 
        if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_MASK) == 0)
                return emulate_ud(ctxt);
 
+       smbase = ctxt->ops->get_smbase(ctxt);
+
+       ret = ctxt->ops->read_phys(ctxt, smbase + 0xfe00, buf, sizeof(buf));
+       if (ret != X86EMUL_CONTINUE)
+               return X86EMUL_UNHANDLEABLE;
+
+       if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
+               ctxt->ops->set_nmi_mask(ctxt, false);
+
+       ctxt->ops->set_hflags(ctxt, ctxt->ops->get_hflags(ctxt) &
+               ~(X86EMUL_SMM_INSIDE_NMI_MASK | X86EMUL_SMM_MASK));
+
        /*
         * Get back to real mode, to prepare a safe state in which to load
         * CR0/CR3/CR4/EFER.  It's all a bit more complicated if the vCPU
         * supports long mode.
         */
-       cr4 = ctxt->ops->get_cr(ctxt, 4);
        if (emulator_has_longmode(ctxt)) {
                struct desc_struct cs_desc;
 
                /* Zero CR4.PCIDE before CR0.PG.  */
-               if (cr4 & X86_CR4_PCIDE) {
+               cr4 = ctxt->ops->get_cr(ctxt, 4);
+               if (cr4 & X86_CR4_PCIDE)
                        ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
-                       cr4 &= ~X86_CR4_PCIDE;
-               }
 
                /* A 32-bit code segment is required to clear EFER.LMA.  */
                memset(&cs_desc, 0, sizeof(cs_desc));
@@ -2597,39 +2610,39 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
        if (cr0 & X86_CR0_PE)
                ctxt->ops->set_cr(ctxt, 0, cr0 & ~(X86_CR0_PG | X86_CR0_PE));
 
-       /* Now clear CR4.PAE (which must be done before clearing EFER.LME).  */
-       if (cr4 & X86_CR4_PAE)
-               ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE);
-
-       /* And finally go back to 32-bit mode.  */
-       efer = 0;
-       ctxt->ops->set_msr(ctxt, MSR_EFER, efer);
+       if (emulator_has_longmode(ctxt)) {
+               /* Clear CR4.PAE before clearing EFER.LME. */
+               cr4 = ctxt->ops->get_cr(ctxt, 4);
+               if (cr4 & X86_CR4_PAE)
+                       ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE);
 
-       smbase = ctxt->ops->get_smbase(ctxt);
+               /* And finally go back to 32-bit mode.  */
+               efer = 0;
+               ctxt->ops->set_msr(ctxt, MSR_EFER, efer);
+       }
 
        /*
         * Give pre_leave_smm() a chance to make ISA-specific changes to the
         * vCPU state (e.g. enter guest mode) before loading state from the SMM
         * state-save area.
         */
-       if (ctxt->ops->pre_leave_smm(ctxt, smbase))
+       if (ctxt->ops->pre_leave_smm(ctxt, buf))
                return X86EMUL_UNHANDLEABLE;
 
+#ifdef CONFIG_X86_64
        if (emulator_has_longmode(ctxt))
-               ret = rsm_load_state_64(ctxt, smbase + 0x8000);
+               ret = rsm_load_state_64(ctxt, buf);
        else
-               ret = rsm_load_state_32(ctxt, smbase + 0x8000);
+#endif
+               ret = rsm_load_state_32(ctxt, buf);
 
        if (ret != X86EMUL_CONTINUE) {
                /* FIXME: should triple fault */
                return X86EMUL_UNHANDLEABLE;
        }
 
-       if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
-               ctxt->ops->set_nmi_mask(ctxt, false);
+       ctxt->ops->post_leave_smm(ctxt);
 
-       ctxt->ops->set_hflags(ctxt, ctxt->ops->get_hflags(ctxt) &
-               ~(X86EMUL_SMM_INSIDE_NMI_MASK | X86EMUL_SMM_MASK));
        return X86EMUL_CONTINUE;
 }
 
index 27c43525a05f1afabeb705b27f955eba5fe5356d..421899f6ad7bfe28237f37bcee6fbee0485eb510 100644 (file)
@@ -526,7 +526,9 @@ static int stimer_set_config(struct kvm_vcpu_hv_stimer *stimer, u64 config,
                new_config.enable = 0;
        stimer->config.as_uint64 = new_config.as_uint64;
 
-       stimer_mark_pending(stimer, false);
+       if (stimer->config.enable)
+               stimer_mark_pending(stimer, false);
+
        return 0;
 }
 
@@ -542,7 +544,10 @@ static int stimer_set_count(struct kvm_vcpu_hv_stimer *stimer, u64 count,
                stimer->config.enable = 0;
        else if (stimer->config.auto_enable)
                stimer->config.enable = 1;
-       stimer_mark_pending(stimer, false);
+
+       if (stimer->config.enable)
+               stimer_mark_pending(stimer, false);
+
        return 0;
 }
 
index 991fdf7fc17fbd9e1a4cab99d688a7af820d397c..9bf70cf845648f5e66143440166d57c5fd287bf9 100644 (file)
@@ -138,6 +138,7 @@ static inline bool kvm_apic_map_get_logical_dest(struct kvm_apic_map *map,
                if (offset <= max_apic_id) {
                        u8 cluster_size = min(max_apic_id - offset + 1, 16U);
 
+                       offset = array_index_nospec(offset, map->max_apic_id + 1);
                        *cluster = &map->phys_map[offset];
                        *mask = dest_id & (0xffff >> (16 - cluster_size));
                } else {
@@ -901,7 +902,8 @@ static inline bool kvm_apic_map_get_dest_lapic(struct kvm *kvm,
                if (irq->dest_id > map->max_apic_id) {
                        *bitmap = 0;
                } else {
-                       *dst = &map->phys_map[irq->dest_id];
+                       u32 dest_id = array_index_nospec(irq->dest_id, map->max_apic_id + 1);
+                       *dst = &map->phys_map[dest_id];
                        *bitmap = 1;
                }
                return true;
index 7837ab001d806f2f3ffd3a56e30d1bae916f7b03..e10962dfc2032d982f124070b88f7d625d2b8f0b 100644 (file)
@@ -182,7 +182,7 @@ struct kvm_shadow_walk_iterator {
 
 static const union kvm_mmu_page_role mmu_base_role_mask = {
        .cr0_wp = 1,
-       .cr4_pae = 1,
+       .gpte_is_8_bytes = 1,
        .nxe = 1,
        .smep_andnot_wp = 1,
        .smap_andnot_wp = 1,
@@ -2007,7 +2007,7 @@ static int is_empty_shadow_page(u64 *spt)
  * aggregate version in order to make the slab shrinker
  * faster
  */
-static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, int nr)
+static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, unsigned long nr)
 {
        kvm->arch.n_used_mmu_pages += nr;
        percpu_counter_add(&kvm_total_used_mmu_pages, nr);
@@ -2205,6 +2205,7 @@ static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
 static void kvm_mmu_commit_zap_page(struct kvm *kvm,
                                    struct list_head *invalid_list);
 
+
 #define for_each_valid_sp(_kvm, _sp, _gfn)                             \
        hlist_for_each_entry(_sp,                                       \
          &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)], hash_link) \
@@ -2215,12 +2216,17 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
        for_each_valid_sp(_kvm, _sp, _gfn)                              \
                if ((_sp)->gfn != (_gfn) || (_sp)->role.direct) {} else
 
+static inline bool is_ept_sp(struct kvm_mmu_page *sp)
+{
+       return sp->role.cr0_wp && sp->role.smap_andnot_wp;
+}
+
 /* @sp->gfn should be write-protected at the call site */
 static bool __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
                            struct list_head *invalid_list)
 {
-       if (sp->role.cr4_pae != !!is_pae(vcpu)
-           || vcpu->arch.mmu->sync_page(vcpu, sp) == 0) {
+       if ((!is_ept_sp(sp) && sp->role.gpte_is_8_bytes != !!is_pae(vcpu)) ||
+           vcpu->arch.mmu->sync_page(vcpu, sp) == 0) {
                kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
                return false;
        }
@@ -2232,7 +2238,7 @@ static bool kvm_mmu_remote_flush_or_zap(struct kvm *kvm,
                                        struct list_head *invalid_list,
                                        bool remote_flush)
 {
-       if (!remote_flush && !list_empty(invalid_list))
+       if (!remote_flush && list_empty(invalid_list))
                return false;
 
        if (!list_empty(invalid_list))
@@ -2423,7 +2429,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
        role.level = level;
        role.direct = direct;
        if (role.direct)
-               role.cr4_pae = 0;
+               role.gpte_is_8_bytes = true;
        role.access = access;
        if (!vcpu->arch.mmu->direct_map
            && vcpu->arch.mmu->root_level <= PT32_ROOT_LEVEL) {
@@ -2757,7 +2763,7 @@ static bool prepare_zap_oldest_mmu_page(struct kvm *kvm,
  * Changing the number of mmu pages allocated to the vm
  * Note: if goal_nr_mmu_pages is too small, you will get dead lock
  */
-void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages)
+void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long goal_nr_mmu_pages)
 {
        LIST_HEAD(invalid_list);
 
@@ -4794,7 +4800,6 @@ static union kvm_mmu_role kvm_calc_mmu_role_common(struct kvm_vcpu *vcpu,
 
        role.base.access = ACC_ALL;
        role.base.nxe = !!is_nx(vcpu);
-       role.base.cr4_pae = !!is_pae(vcpu);
        role.base.cr0_wp = is_write_protection(vcpu);
        role.base.smm = is_smm(vcpu);
        role.base.guest_mode = is_guest_mode(vcpu);
@@ -4815,6 +4820,7 @@ kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only)
        role.base.ad_disabled = (shadow_accessed_mask == 0);
        role.base.level = kvm_x86_ops->get_tdp_level(vcpu);
        role.base.direct = true;
+       role.base.gpte_is_8_bytes = true;
 
        return role;
 }
@@ -4879,6 +4885,7 @@ kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only)
        role.base.smap_andnot_wp = role.ext.cr4_smap &&
                !is_write_protection(vcpu);
        role.base.direct = !is_paging(vcpu);
+       role.base.gpte_is_8_bytes = !!is_pae(vcpu);
 
        if (!is_long_mode(vcpu))
                role.base.level = PT32E_ROOT_LEVEL;
@@ -4918,18 +4925,26 @@ static union kvm_mmu_role
 kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty,
                                   bool execonly)
 {
-       union kvm_mmu_role role;
+       union kvm_mmu_role role = {0};
 
-       /* Base role is inherited from root_mmu */
-       role.base.word = vcpu->arch.root_mmu.mmu_role.base.word;
-       role.ext = kvm_calc_mmu_role_ext(vcpu);
+       /* SMM flag is inherited from root_mmu */
+       role.base.smm = vcpu->arch.root_mmu.mmu_role.base.smm;
 
        role.base.level = PT64_ROOT_4LEVEL;
+       role.base.gpte_is_8_bytes = true;
        role.base.direct = false;
        role.base.ad_disabled = !accessed_dirty;
        role.base.guest_mode = true;
        role.base.access = ACC_ALL;
 
+       /*
+        * WP=1 and NOT_WP=1 is an impossible combination, use WP and the
+        * SMAP variation to denote shadow EPT entries.
+        */
+       role.base.cr0_wp = true;
+       role.base.smap_andnot_wp = true;
+
+       role.ext = kvm_calc_mmu_role_ext(vcpu);
        role.ext.execonly = execonly;
 
        return role;
@@ -5179,7 +5194,7 @@ static bool detect_write_misaligned(struct kvm_mmu_page *sp, gpa_t gpa,
                 gpa, bytes, sp->role.word);
 
        offset = offset_in_page(gpa);
-       pte_size = sp->role.cr4_pae ? 8 : 4;
+       pte_size = sp->role.gpte_is_8_bytes ? 8 : 4;
 
        /*
         * Sometimes, the OS only writes the last one bytes to update status
@@ -5203,7 +5218,7 @@ static u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte)
        page_offset = offset_in_page(gpa);
        level = sp->role.level;
        *nspte = 1;
-       if (!sp->role.cr4_pae) {
+       if (!sp->role.gpte_is_8_bytes) {
                page_offset <<= 1;      /* 32->64 */
                /*
                 * A 32-bit pde maps 4MB while the shadow pdes map
@@ -5393,10 +5408,12 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
         * This can happen if a guest gets a page-fault on data access but the HW
         * table walker is not able to read the instruction page (e.g instruction
         * page is not present in memory). In those cases we simply restart the
-        * guest.
+        * guest, with the exception of AMD Erratum 1096 which is unrecoverable.
         */
-       if (unlikely(insn && !insn_len))
-               return 1;
+       if (unlikely(insn && !insn_len)) {
+               if (!kvm_x86_ops->need_emulation_on_page_fault(vcpu))
+                       return 1;
+       }
 
        er = x86_emulate_instruction(vcpu, cr2, emulation_type, insn, insn_len);
 
@@ -5509,7 +5526,9 @@ slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot,
 
                if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
                        if (flush && lock_flush_tlb) {
-                               kvm_flush_remote_tlbs(kvm);
+                               kvm_flush_remote_tlbs_with_address(kvm,
+                                               start_gfn,
+                                               iterator.gfn - start_gfn + 1);
                                flush = false;
                        }
                        cond_resched_lock(&kvm->mmu_lock);
@@ -5517,7 +5536,8 @@ slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot,
        }
 
        if (flush && lock_flush_tlb) {
-               kvm_flush_remote_tlbs(kvm);
+               kvm_flush_remote_tlbs_with_address(kvm, start_gfn,
+                                                  end_gfn - start_gfn + 1);
                flush = false;
        }
 
@@ -6011,10 +6031,10 @@ int kvm_mmu_module_init(void)
 /*
  * Calculate mmu pages needed for kvm.
  */
-unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
+unsigned long kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm)
 {
-       unsigned int nr_mmu_pages;
-       unsigned int  nr_pages = 0;
+       unsigned long nr_mmu_pages;
+       unsigned long nr_pages = 0;
        struct kvm_memslots *slots;
        struct kvm_memory_slot *memslot;
        int i;
@@ -6027,8 +6047,7 @@ unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
        }
 
        nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
-       nr_mmu_pages = max(nr_mmu_pages,
-                          (unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
+       nr_mmu_pages = max(nr_mmu_pages, KVM_MIN_ALLOC_MMU_PAGES);
 
        return nr_mmu_pages;
 }
index bbdc60f2fae89beb34c72716d9e7eb9c33584651..54c2a377795be6920bee9676e58555110c3a56b9 100644 (file)
@@ -64,7 +64,7 @@ bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu);
 int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
                                u64 fault_address, char *insn, int insn_len);
 
-static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
+static inline unsigned long kvm_mmu_available_pages(struct kvm *kvm)
 {
        if (kvm->arch.n_max_mmu_pages > kvm->arch.n_used_mmu_pages)
                return kvm->arch.n_max_mmu_pages -
index 9f6c855a00439a58f5c4ea58e099d935d65e22d9..dd30dccd2ad5e250aef10e889e150011fece3468 100644 (file)
                                                                        \
        role.word = __entry->role;                                      \
                                                                        \
-       trace_seq_printf(p, "sp gfn %llx l%u%s q%u%s %s%s"              \
+       trace_seq_printf(p, "sp gfn %llx l%u %u-byte q%u%s %s%s"        \
                         " %snxe %sad root %u %s%c",                    \
                         __entry->gfn, role.level,                      \
-                        role.cr4_pae ? " pae" : "",                    \
+                        role.gpte_is_8_bytes ? 8 : 4,                  \
                         role.quadrant,                                 \
                         role.direct ? " direct" : "",                  \
                         access_str[role.access],                       \
index 58ead7db71a312764b56d9f242e84820239eeb93..e39741997893a977fdda077ff637bf465fbb1748 100644 (file)
@@ -281,9 +281,13 @@ static int kvm_pmu_rdpmc_vmware(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
 int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
 {
        bool fast_mode = idx & (1u << 31);
+       struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
        struct kvm_pmc *pmc;
        u64 ctr_val;
 
+       if (!pmu->version)
+               return 1;
+
        if (is_vmware_backdoor_pmc(idx))
                return kvm_pmu_rdpmc_vmware(vcpu, idx, data);
 
index b5b128a0a05124d275af1f103fe3e40315df80f5..406b558abfef7379eb46bd2de18e5d6890079eb9 100644 (file)
@@ -262,6 +262,7 @@ struct amd_svm_iommu_ir {
 };
 
 #define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK   (0xFF)
+#define AVIC_LOGICAL_ID_ENTRY_VALID_BIT                        31
 #define AVIC_LOGICAL_ID_ENTRY_VALID_MASK               (1 << 31)
 
 #define AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK   (0xFFULL)
@@ -2692,6 +2693,7 @@ static int npf_interception(struct vcpu_svm *svm)
 static int db_interception(struct vcpu_svm *svm)
 {
        struct kvm_run *kvm_run = svm->vcpu.run;
+       struct kvm_vcpu *vcpu = &svm->vcpu;
 
        if (!(svm->vcpu.guest_debug &
              (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) &&
@@ -2702,6 +2704,8 @@ static int db_interception(struct vcpu_svm *svm)
 
        if (svm->nmi_singlestep) {
                disable_nmi_singlestep(svm);
+               /* Make sure we check for pending NMIs upon entry */
+               kvm_make_request(KVM_REQ_EVENT, vcpu);
        }
 
        if (svm->vcpu.guest_debug &
@@ -4517,14 +4521,25 @@ static int avic_incomplete_ipi_interception(struct vcpu_svm *svm)
                kvm_lapic_reg_write(apic, APIC_ICR, icrl);
                break;
        case AVIC_IPI_FAILURE_TARGET_NOT_RUNNING: {
+               int i;
+               struct kvm_vcpu *vcpu;
+               struct kvm *kvm = svm->vcpu.kvm;
                struct kvm_lapic *apic = svm->vcpu.arch.apic;
 
                /*
-                * Update ICR high and low, then emulate sending IPI,
-                * which is handled when writing APIC_ICR.
+                * At this point, we expect that the AVIC HW has already
+                * set the appropriate IRR bits on the valid target
+                * vcpus. So, we just need to kick the appropriate vcpu.
                 */
-               kvm_lapic_reg_write(apic, APIC_ICR2, icrh);
-               kvm_lapic_reg_write(apic, APIC_ICR, icrl);
+               kvm_for_each_vcpu(i, vcpu, kvm) {
+                       bool m = kvm_apic_match_dest(vcpu, apic,
+                                                    icrl & KVM_APIC_SHORT_MASK,
+                                                    GET_APIC_DEST_FIELD(icrh),
+                                                    icrl & KVM_APIC_DEST_MASK);
+
+                       if (m && !avic_vcpu_is_running(vcpu))
+                               kvm_vcpu_wake_up(vcpu);
+               }
                break;
        }
        case AVIC_IPI_FAILURE_INVALID_TARGET:
@@ -4596,7 +4611,7 @@ static void avic_invalidate_logical_id_entry(struct kvm_vcpu *vcpu)
        u32 *entry = avic_get_logical_id_entry(vcpu, svm->ldr_reg, flat);
 
        if (entry)
-               WRITE_ONCE(*entry, (u32) ~AVIC_LOGICAL_ID_ENTRY_VALID_MASK);
+               clear_bit(AVIC_LOGICAL_ID_ENTRY_VALID_BIT, (unsigned long *)entry);
 }
 
 static int avic_handle_ldr_update(struct kvm_vcpu *vcpu)
@@ -5621,6 +5636,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
        svm->vmcb->save.cr2 = vcpu->arch.cr2;
 
        clgi();
+       kvm_load_guest_xcr0(vcpu);
 
        /*
         * If this vCPU has touched SPEC_CTRL, restore the guest's value if
@@ -5766,6 +5782,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
        if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
                kvm_before_interrupt(&svm->vcpu);
 
+       kvm_put_guest_xcr0(vcpu);
        stgi();
 
        /* Any pending NMI will happen here */
@@ -6215,32 +6232,24 @@ static int svm_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
        return 0;
 }
 
-static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, u64 smbase)
+static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
        struct vmcb *nested_vmcb;
        struct page *page;
-       struct {
-               u64 guest;
-               u64 vmcb;
-       } svm_state_save;
-       int ret;
+       u64 guest;
+       u64 vmcb;
 
-       ret = kvm_vcpu_read_guest(vcpu, smbase + 0xfed8, &svm_state_save,
-                                 sizeof(svm_state_save));
-       if (ret)
-               return ret;
+       guest = GET_SMSTATE(u64, smstate, 0x7ed8);
+       vmcb = GET_SMSTATE(u64, smstate, 0x7ee0);
 
-       if (svm_state_save.guest) {
-               vcpu->arch.hflags &= ~HF_SMM_MASK;
-               nested_vmcb = nested_svm_map(svm, svm_state_save.vmcb, &page);
-               if (nested_vmcb)
-                       enter_svm_guest_mode(svm, svm_state_save.vmcb, nested_vmcb, page);
-               else
-                       ret = 1;
-               vcpu->arch.hflags |= HF_SMM_MASK;
+       if (guest) {
+               nested_vmcb = nested_svm_map(svm, vmcb, &page);
+               if (!nested_vmcb)
+                       return 1;
+               enter_svm_guest_mode(svm, vmcb, nested_vmcb, page);
        }
-       return ret;
+       return 0;
 }
 
 static int enable_smi_window(struct kvm_vcpu *vcpu)
@@ -6422,11 +6431,11 @@ static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
        return ret;
 }
 
-static int get_num_contig_pages(int idx, struct page **inpages,
-                               unsigned long npages)
+static unsigned long get_num_contig_pages(unsigned long idx,
+                               struct page **inpages, unsigned long npages)
 {
        unsigned long paddr, next_paddr;
-       int i = idx + 1, pages = 1;
+       unsigned long i = idx + 1, pages = 1;
 
        /* find the number of contiguous pages starting from idx */
        paddr = __sme_page_pa(inpages[idx]);
@@ -6445,12 +6454,12 @@ static int get_num_contig_pages(int idx, struct page **inpages,
 
 static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
 {
-       unsigned long vaddr, vaddr_end, next_vaddr, npages, size;
+       unsigned long vaddr, vaddr_end, next_vaddr, npages, pages, size, i;
        struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
        struct kvm_sev_launch_update_data params;
        struct sev_data_launch_update_data *data;
        struct page **inpages;
-       int i, ret, pages;
+       int ret;
 
        if (!sev_guest(kvm))
                return -ENOTTY;
@@ -6799,7 +6808,8 @@ static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
        struct page **src_p, **dst_p;
        struct kvm_sev_dbg debug;
        unsigned long n;
-       int ret, size;
+       unsigned int size;
+       int ret;
 
        if (!sev_guest(kvm))
                return -ENOTTY;
@@ -6807,6 +6817,11 @@ static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
        if (copy_from_user(&debug, (void __user *)(uintptr_t)argp->data, sizeof(debug)))
                return -EFAULT;
 
+       if (!debug.len || debug.src_uaddr + debug.len < debug.src_uaddr)
+               return -EINVAL;
+       if (!debug.dst_uaddr)
+               return -EINVAL;
+
        vaddr = debug.src_uaddr;
        size = debug.len;
        vaddr_end = vaddr + size;
@@ -6857,8 +6872,8 @@ static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
                                                     dst_vaddr,
                                                     len, &argp->error);
 
-               sev_unpin_memory(kvm, src_p, 1);
-               sev_unpin_memory(kvm, dst_p, 1);
+               sev_unpin_memory(kvm, src_p, n);
+               sev_unpin_memory(kvm, dst_p, n);
 
                if (ret)
                        goto err;
@@ -7098,6 +7113,36 @@ static int nested_enable_evmcs(struct kvm_vcpu *vcpu,
        return -ENODEV;
 }
 
+static bool svm_need_emulation_on_page_fault(struct kvm_vcpu *vcpu)
+{
+       bool is_user, smap;
+
+       is_user = svm_get_cpl(vcpu) == 3;
+       smap = !kvm_read_cr4_bits(vcpu, X86_CR4_SMAP);
+
+       /*
+        * Detect and workaround Errata 1096 Fam_17h_00_0Fh
+        *
+        * In non SEV guest, hypervisor will be able to read the guest
+        * memory to decode the instruction pointer when insn_len is zero
+        * so we return true to indicate that decoding is possible.
+        *
+        * But in the SEV guest, the guest memory is encrypted with the
+        * guest specific key and hypervisor will not be able to decode the
+        * instruction pointer so we will not able to workaround it. Lets
+        * print the error and request to kill the guest.
+        */
+       if (is_user && smap) {
+               if (!sev_guest(vcpu->kvm))
+                       return true;
+
+               pr_err_ratelimited("KVM: Guest triggered AMD Erratum 1096\n");
+               kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
+       }
+
+       return false;
+}
+
 static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
        .cpu_has_kvm_support = has_svm,
        .disabled_by_bios = is_disabled,
@@ -7231,6 +7276,8 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
 
        .nested_enable_evmcs = nested_enable_evmcs,
        .nested_get_evmcs_version = nested_get_evmcs_version,
+
+       .need_emulation_on_page_fault = svm_need_emulation_on_page_fault,
 };
 
 static int __init svm_init(void)
index 6432d08c7de79ccbde654b7ab17c9649b75a25c2..4d47a2631d1fb46d9f913b59743cb5417d7401c6 100644 (file)
@@ -438,13 +438,13 @@ TRACE_EVENT(kvm_apic_ipi,
 );
 
 TRACE_EVENT(kvm_apic_accept_irq,
-           TP_PROTO(__u32 apicid, __u16 dm, __u8 tm, __u8 vec),
+           TP_PROTO(__u32 apicid, __u16 dm, __u16 tm, __u8 vec),
            TP_ARGS(apicid, dm, tm, vec),
 
        TP_STRUCT__entry(
                __field(        __u32,          apicid          )
                __field(        __u16,          dm              )
-               __field(        __u8,           tm              )
+               __field(        __u16,          tm              )
                __field(        __u8,           vec             )
        ),
 
index f24a2c2250706f24741e4503ed5ba60232b3613e..6401eb7ef19ce0e9f9258b617e001dfdac534a2a 100644 (file)
@@ -500,6 +500,17 @@ static void nested_vmx_disable_intercept_for_msr(unsigned long *msr_bitmap_l1,
        }
 }
 
+static inline void enable_x2apic_msr_intercepts(unsigned long *msr_bitmap) {
+       int msr;
+
+       for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
+               unsigned word = msr / BITS_PER_LONG;
+
+               msr_bitmap[word] = ~0;
+               msr_bitmap[word + (0x800 / sizeof(long))] = ~0;
+       }
+}
+
 /*
  * Merge L0's and L1's MSR bitmap, return false to indicate that
  * we do not use the hardware.
@@ -541,39 +552,44 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
                return false;
 
        msr_bitmap_l1 = (unsigned long *)kmap(page);
-       if (nested_cpu_has_apic_reg_virt(vmcs12)) {
-               /*
-                * L0 need not intercept reads for MSRs between 0x800 and 0x8ff, it
-                * just lets the processor take the value from the virtual-APIC page;
-                * take those 256 bits directly from the L1 bitmap.
-                */
-               for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
-                       unsigned word = msr / BITS_PER_LONG;
-                       msr_bitmap_l0[word] = msr_bitmap_l1[word];
-                       msr_bitmap_l0[word + (0x800 / sizeof(long))] = ~0;
-               }
-       } else {
-               for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
-                       unsigned word = msr / BITS_PER_LONG;
-                       msr_bitmap_l0[word] = ~0;
-                       msr_bitmap_l0[word + (0x800 / sizeof(long))] = ~0;
-               }
-       }
 
-       nested_vmx_disable_intercept_for_msr(
-               msr_bitmap_l1, msr_bitmap_l0,
-               X2APIC_MSR(APIC_TASKPRI),
-               MSR_TYPE_W);
+       /*
+        * To keep the control flow simple, pay eight 8-byte writes (sixteen
+        * 4-byte writes on 32-bit systems) up front to enable intercepts for
+        * the x2APIC MSR range and selectively disable them below.
+        */
+       enable_x2apic_msr_intercepts(msr_bitmap_l0);
+
+       if (nested_cpu_has_virt_x2apic_mode(vmcs12)) {
+               if (nested_cpu_has_apic_reg_virt(vmcs12)) {
+                       /*
+                        * L0 need not intercept reads for MSRs between 0x800
+                        * and 0x8ff, it just lets the processor take the value
+                        * from the virtual-APIC page; take those 256 bits
+                        * directly from the L1 bitmap.
+                        */
+                       for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
+                               unsigned word = msr / BITS_PER_LONG;
+
+                               msr_bitmap_l0[word] = msr_bitmap_l1[word];
+                       }
+               }
 
-       if (nested_cpu_has_vid(vmcs12)) {
-               nested_vmx_disable_intercept_for_msr(
-                       msr_bitmap_l1, msr_bitmap_l0,
-                       X2APIC_MSR(APIC_EOI),
-                       MSR_TYPE_W);
                nested_vmx_disable_intercept_for_msr(
                        msr_bitmap_l1, msr_bitmap_l0,
-                       X2APIC_MSR(APIC_SELF_IPI),
-                       MSR_TYPE_W);
+                       X2APIC_MSR(APIC_TASKPRI),
+                       MSR_TYPE_R | MSR_TYPE_W);
+
+               if (nested_cpu_has_vid(vmcs12)) {
+                       nested_vmx_disable_intercept_for_msr(
+                               msr_bitmap_l1, msr_bitmap_l0,
+                               X2APIC_MSR(APIC_EOI),
+                               MSR_TYPE_W);
+                       nested_vmx_disable_intercept_for_msr(
+                               msr_bitmap_l1, msr_bitmap_l0,
+                               X2APIC_MSR(APIC_SELF_IPI),
+                               MSR_TYPE_W);
+               }
        }
 
        if (spec_ctrl)
@@ -2585,6 +2601,11 @@ static int nested_check_host_control_regs(struct kvm_vcpu *vcpu,
            !nested_host_cr4_valid(vcpu, vmcs12->host_cr4) ||
            !nested_cr3_valid(vcpu, vmcs12->host_cr3))
                return -EINVAL;
+
+       if (is_noncanonical_address(vmcs12->host_ia32_sysenter_esp, vcpu) ||
+           is_noncanonical_address(vmcs12->host_ia32_sysenter_eip, vcpu))
+               return -EINVAL;
+
        /*
         * If the load IA32_EFER VM-exit control is 1, bits reserved in the
         * IA32_EFER MSR must be 0 in the field for that register. In addition,
@@ -2852,20 +2873,27 @@ static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
                /*
                 * If translation failed, VM entry will fail because
                 * prepare_vmcs02 set VIRTUAL_APIC_PAGE_ADDR to -1ull.
-                * Failing the vm entry is _not_ what the processor
-                * does but it's basically the only possibility we
-                * have.  We could still enter the guest if CR8 load
-                * exits are enabled, CR8 store exits are enabled, and
-                * virtualize APIC access is disabled; in this case
-                * the processor would never use the TPR shadow and we
-                * could simply clear the bit from the execution
-                * control.  But such a configuration is useless, so
-                * let's keep the code simple.
                 */
                if (!is_error_page(page)) {
                        vmx->nested.virtual_apic_page = page;
                        hpa = page_to_phys(vmx->nested.virtual_apic_page);
                        vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, hpa);
+               } else if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING) &&
+                          nested_cpu_has(vmcs12, CPU_BASED_CR8_STORE_EXITING) &&
+                          !nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
+                       /*
+                        * The processor will never use the TPR shadow, simply
+                        * clear the bit from the execution control.  Such a
+                        * configuration is useless, but it happens in tests.
+                        * For any other configuration, failing the vm entry is
+                        * _not_ what the processor does but it's basically the
+                        * only possibility we have.
+                        */
+                       vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL,
+                                       CPU_BASED_TPR_SHADOW);
+               } else {
+                       printk("bad virtual-APIC page address\n");
+                       dump_vmcs();
                }
        }
 
@@ -3768,8 +3796,18 @@ static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu)
        vmx_set_cr4(vcpu, vmcs_readl(CR4_READ_SHADOW));
 
        nested_ept_uninit_mmu_context(vcpu);
-       vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
-       __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
+
+       /*
+        * This is only valid if EPT is in use, otherwise the vmcs01 GUEST_CR3
+        * points to shadow pages!  Fortunately we only get here after a WARN_ON
+        * if EPT is disabled, so a VMabort is perfectly fine.
+        */
+       if (enable_ept) {
+               vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
+               __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
+       } else {
+               nested_vmx_abort(vcpu, VMX_ABORT_VMCS_CORRUPTED);
+       }
 
        /*
         * Use ept_save_pdptrs(vcpu) to load the MMU's cached PDPTRs
@@ -5717,6 +5755,14 @@ __init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *))
 {
        int i;
 
+       /*
+        * Without EPT it is not possible to restore L1's CR3 and PDPTR on
+        * VMfail, because they are not available in vmcs01.  Just always
+        * use hardware checks.
+        */
+       if (!enable_ept)
+               nested_early_check = 1;
+
        if (!cpu_has_vmx_shadow_vmcs())
                enable_shadow_vmcs = 0;
        if (enable_shadow_vmcs) {
index c73375e01ab8c4ca52d5d87e2ca9b3648906485a..b4e7d645275a2153c42fa252cce8a8cbb930b59e 100644 (file)
@@ -1683,12 +1683,6 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 
                msr_info->data = to_vmx(vcpu)->spec_ctrl;
                break;
-       case MSR_IA32_ARCH_CAPABILITIES:
-               if (!msr_info->host_initiated &&
-                   !guest_cpuid_has(vcpu, X86_FEATURE_ARCH_CAPABILITIES))
-                       return 1;
-               msr_info->data = to_vmx(vcpu)->arch_capabilities;
-               break;
        case MSR_IA32_SYSENTER_CS:
                msr_info->data = vmcs_read32(GUEST_SYSENTER_CS);
                break;
@@ -1895,11 +1889,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                vmx_disable_intercept_for_msr(vmx->vmcs01.msr_bitmap, MSR_IA32_PRED_CMD,
                                              MSR_TYPE_W);
                break;
-       case MSR_IA32_ARCH_CAPABILITIES:
-               if (!msr_info->host_initiated)
-                       return 1;
-               vmx->arch_capabilities = data;
-               break;
        case MSR_IA32_CR_PAT:
                if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
                        if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
@@ -4088,8 +4077,6 @@ static void vmx_vcpu_setup(struct vcpu_vmx *vmx)
                ++vmx->nmsrs;
        }
 
-       vmx->arch_capabilities = kvm_get_arch_capabilities();
-
        vm_exit_controls_init(vmx, vmx_vmexit_ctrl());
 
        /* 22.2.1, 20.8.1 */
@@ -5616,7 +5603,7 @@ static void vmx_dump_dtsel(char *name, uint32_t limit)
               vmcs_readl(limit + GUEST_GDTR_BASE - GUEST_GDTR_LIMIT));
 }
 
-static void dump_vmcs(void)
+void dump_vmcs(void)
 {
        u32 vmentry_ctl = vmcs_read32(VM_ENTRY_CONTROLS);
        u32 vmexit_ctl = vmcs_read32(VM_EXIT_CONTROLS);
@@ -6423,6 +6410,8 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
        if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
                vmx_set_interrupt_shadow(vcpu, 0);
 
+       kvm_load_guest_xcr0(vcpu);
+
        if (static_cpu_has(X86_FEATURE_PKU) &&
            kvm_read_cr4_bits(vcpu, X86_CR4_PKE) &&
            vcpu->arch.pkru != vmx->host_pkru)
@@ -6519,6 +6508,8 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
                        __write_pkru(vmx->host_pkru);
        }
 
+       kvm_put_guest_xcr0(vcpu);
+
        vmx->nested.nested_run_pending = 0;
        vmx->idt_vectoring_info = 0;
 
@@ -6865,6 +6856,30 @@ static void nested_vmx_entry_exit_ctls_update(struct kvm_vcpu *vcpu)
        }
 }
 
+static bool guest_cpuid_has_pmu(struct kvm_vcpu *vcpu)
+{
+       struct kvm_cpuid_entry2 *entry;
+       union cpuid10_eax eax;
+
+       entry = kvm_find_cpuid_entry(vcpu, 0xa, 0);
+       if (!entry)
+               return false;
+
+       eax.full = entry->eax;
+       return (eax.split.version_id > 0);
+}
+
+static void nested_vmx_procbased_ctls_update(struct kvm_vcpu *vcpu)
+{
+       struct vcpu_vmx *vmx = to_vmx(vcpu);
+       bool pmu_enabled = guest_cpuid_has_pmu(vcpu);
+
+       if (pmu_enabled)
+               vmx->nested.msrs.procbased_ctls_high |= CPU_BASED_RDPMC_EXITING;
+       else
+               vmx->nested.msrs.procbased_ctls_high &= ~CPU_BASED_RDPMC_EXITING;
+}
+
 static void update_intel_pt_cfg(struct kvm_vcpu *vcpu)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -6953,6 +6968,7 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
        if (nested_vmx_allowed(vcpu)) {
                nested_vmx_cr_fixed1_bits_update(vcpu);
                nested_vmx_entry_exit_ctls_update(vcpu);
+               nested_vmx_procbased_ctls_update(vcpu);
        }
 
        if (boot_cpu_has(X86_FEATURE_INTEL_PT) &&
@@ -7382,7 +7398,7 @@ static int vmx_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
        return 0;
 }
 
-static int vmx_pre_leave_smm(struct kvm_vcpu *vcpu, u64 smbase)
+static int vmx_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        int ret;
@@ -7393,9 +7409,7 @@ static int vmx_pre_leave_smm(struct kvm_vcpu *vcpu, u64 smbase)
        }
 
        if (vmx->nested.smm.guest_mode) {
-               vcpu->arch.hflags &= ~HF_SMM_MASK;
                ret = nested_vmx_enter_non_root_mode(vcpu, false);
-               vcpu->arch.hflags |= HF_SMM_MASK;
                if (ret)
                        return ret;
 
@@ -7409,6 +7423,11 @@ static int enable_smi_window(struct kvm_vcpu *vcpu)
        return 0;
 }
 
+static bool vmx_need_emulation_on_page_fault(struct kvm_vcpu *vcpu)
+{
+       return 0;
+}
+
 static __init int hardware_setup(void)
 {
        unsigned long host_bndcfgs;
@@ -7711,6 +7730,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
        .set_nested_state = NULL,
        .get_vmcs12_pages = NULL,
        .nested_enable_evmcs = NULL,
+       .need_emulation_on_page_fault = vmx_need_emulation_on_page_fault,
 };
 
 static void vmx_cleanup_l1d_flush(void)
index 1554cb45b3931a6de9d91b0092de1ff85d4c993f..f879529906b48cd84e99cc0f672210aaeaffeabd 100644 (file)
@@ -190,7 +190,6 @@ struct vcpu_vmx {
        u64                   msr_guest_kernel_gs_base;
 #endif
 
-       u64                   arch_capabilities;
        u64                   spec_ctrl;
 
        u32 vm_entry_controls_shadow;
@@ -518,4 +517,6 @@ static inline void decache_tsc_multiplier(struct vcpu_vmx *vmx)
        vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio);
 }
 
+void dump_vmcs(void);
+
 #endif /* __KVM_X86_VMX_H */
index 65e4559eef2fc8589e0a4277077e766ceead3994..a0d1fc80ac5a8407c123d8df12eb2215d4d70392 100644 (file)
@@ -800,7 +800,7 @@ void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
 }
 EXPORT_SYMBOL_GPL(kvm_lmsw);
 
-static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
+void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
 {
        if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) &&
                        !vcpu->guest_xcr0_loaded) {
@@ -810,8 +810,9 @@ static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
                vcpu->guest_xcr0_loaded = 1;
        }
 }
+EXPORT_SYMBOL_GPL(kvm_load_guest_xcr0);
 
-static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
+void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
 {
        if (vcpu->guest_xcr0_loaded) {
                if (vcpu->arch.xcr0 != host_xcr0)
@@ -819,6 +820,7 @@ static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
                vcpu->guest_xcr0_loaded = 0;
        }
 }
+EXPORT_SYMBOL_GPL(kvm_put_guest_xcr0);
 
 static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
 {
@@ -1125,7 +1127,7 @@ static u32 msrs_to_save[] = {
 #endif
        MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA,
        MSR_IA32_FEATURE_CONTROL, MSR_IA32_BNDCFGS, MSR_TSC_AUX,
-       MSR_IA32_SPEC_CTRL, MSR_IA32_ARCH_CAPABILITIES,
+       MSR_IA32_SPEC_CTRL,
        MSR_IA32_RTIT_CTL, MSR_IA32_RTIT_STATUS, MSR_IA32_RTIT_CR3_MATCH,
        MSR_IA32_RTIT_OUTPUT_BASE, MSR_IA32_RTIT_OUTPUT_MASK,
        MSR_IA32_RTIT_ADDR0_A, MSR_IA32_RTIT_ADDR0_B,
@@ -1158,6 +1160,7 @@ static u32 emulated_msrs[] = {
 
        MSR_IA32_TSC_ADJUST,
        MSR_IA32_TSCDEADLINE,
+       MSR_IA32_ARCH_CAPABILITIES,
        MSR_IA32_MISC_ENABLE,
        MSR_IA32_MCG_STATUS,
        MSR_IA32_MCG_CTL,
@@ -2443,6 +2446,11 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                if (msr_info->host_initiated)
                        vcpu->arch.microcode_version = data;
                break;
+       case MSR_IA32_ARCH_CAPABILITIES:
+               if (!msr_info->host_initiated)
+                       return 1;
+               vcpu->arch.arch_capabilities = data;
+               break;
        case MSR_EFER:
                return set_efer(vcpu, data);
        case MSR_K7_HWCR:
@@ -2747,6 +2755,12 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
        case MSR_IA32_UCODE_REV:
                msr_info->data = vcpu->arch.microcode_version;
                break;
+       case MSR_IA32_ARCH_CAPABILITIES:
+               if (!msr_info->host_initiated &&
+                   !guest_cpuid_has(vcpu, X86_FEATURE_ARCH_CAPABILITIES))
+                       return 1;
+               msr_info->data = vcpu->arch.arch_capabilities;
+               break;
        case MSR_IA32_TSC:
                msr_info->data = kvm_scale_tsc(vcpu, rdtsc()) + vcpu->arch.tsc_offset;
                break;
@@ -3081,7 +3095,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
                break;
        case KVM_CAP_NESTED_STATE:
                r = kvm_x86_ops->get_nested_state ?
-                       kvm_x86_ops->get_nested_state(NULL, 0, 0) : 0;
+                       kvm_x86_ops->get_nested_state(NULL, NULL, 0) : 0;
                break;
        default:
                break;
@@ -3516,7 +3530,7 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
        memset(&events->reserved, 0, sizeof(events->reserved));
 }
 
-static void kvm_set_hflags(struct kvm_vcpu *vcpu, unsigned emul_flags);
+static void kvm_smm_changed(struct kvm_vcpu *vcpu);
 
 static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
                                              struct kvm_vcpu_events *events)
@@ -3576,12 +3590,13 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
                vcpu->arch.apic->sipi_vector = events->sipi_vector;
 
        if (events->flags & KVM_VCPUEVENT_VALID_SMM) {
-               u32 hflags = vcpu->arch.hflags;
-               if (events->smi.smm)
-                       hflags |= HF_SMM_MASK;
-               else
-                       hflags &= ~HF_SMM_MASK;
-               kvm_set_hflags(vcpu, hflags);
+               if (!!(vcpu->arch.hflags & HF_SMM_MASK) != events->smi.smm) {
+                       if (events->smi.smm)
+                               vcpu->arch.hflags |= HF_SMM_MASK;
+                       else
+                               vcpu->arch.hflags &= ~HF_SMM_MASK;
+                       kvm_smm_changed(vcpu);
+               }
 
                vcpu->arch.smi_pending = events->smi.pending;
 
@@ -4258,7 +4273,7 @@ static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm,
 }
 
 static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
-                                         u32 kvm_nr_mmu_pages)
+                                        unsigned long kvm_nr_mmu_pages)
 {
        if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
                return -EINVAL;
@@ -4272,7 +4287,7 @@ static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
        return 0;
 }
 
-static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
+static unsigned long kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
 {
        return kvm->arch.n_max_mmu_pages;
 }
@@ -5946,12 +5961,18 @@ static unsigned emulator_get_hflags(struct x86_emulate_ctxt *ctxt)
 
 static void emulator_set_hflags(struct x86_emulate_ctxt *ctxt, unsigned emul_flags)
 {
-       kvm_set_hflags(emul_to_vcpu(ctxt), emul_flags);
+       emul_to_vcpu(ctxt)->arch.hflags = emul_flags;
+}
+
+static int emulator_pre_leave_smm(struct x86_emulate_ctxt *ctxt,
+                                 const char *smstate)
+{
+       return kvm_x86_ops->pre_leave_smm(emul_to_vcpu(ctxt), smstate);
 }
 
-static int emulator_pre_leave_smm(struct x86_emulate_ctxt *ctxt, u64 smbase)
+static void emulator_post_leave_smm(struct x86_emulate_ctxt *ctxt)
 {
-       return kvm_x86_ops->pre_leave_smm(emul_to_vcpu(ctxt), smbase);
+       kvm_smm_changed(emul_to_vcpu(ctxt));
 }
 
 static const struct x86_emulate_ops emulate_ops = {
@@ -5994,6 +6015,7 @@ static const struct x86_emulate_ops emulate_ops = {
        .get_hflags          = emulator_get_hflags,
        .set_hflags          = emulator_set_hflags,
        .pre_leave_smm       = emulator_pre_leave_smm,
+       .post_leave_smm      = emulator_post_leave_smm,
 };
 
 static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
@@ -6235,16 +6257,6 @@ static void kvm_smm_changed(struct kvm_vcpu *vcpu)
        kvm_mmu_reset_context(vcpu);
 }
 
-static void kvm_set_hflags(struct kvm_vcpu *vcpu, unsigned emul_flags)
-{
-       unsigned changed = vcpu->arch.hflags ^ emul_flags;
-
-       vcpu->arch.hflags = emul_flags;
-
-       if (changed & HF_SMM_MASK)
-               kvm_smm_changed(vcpu);
-}
-
 static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7,
                                unsigned long *db)
 {
@@ -6523,14 +6535,27 @@ int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu,
 }
 EXPORT_SYMBOL_GPL(kvm_emulate_instruction_from_buffer);
 
+static int complete_fast_pio_out(struct kvm_vcpu *vcpu)
+{
+       vcpu->arch.pio.count = 0;
+
+       if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.pio.linear_rip)))
+               return 1;
+
+       return kvm_skip_emulated_instruction(vcpu);
+}
+
 static int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size,
                            unsigned short port)
 {
        unsigned long val = kvm_register_read(vcpu, VCPU_REGS_RAX);
        int ret = emulator_pio_out_emulated(&vcpu->arch.emulate_ctxt,
                                            size, port, &val, 1);
-       /* do not return to emulator after return from userspace */
-       vcpu->arch.pio.count = 0;
+
+       if (!ret) {
+               vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu);
+               vcpu->arch.complete_userspace_io = complete_fast_pio_out;
+       }
        return ret;
 }
 
@@ -6541,6 +6566,11 @@ static int complete_fast_pio_in(struct kvm_vcpu *vcpu)
        /* We should only ever be called with arch.pio.count equal to 1 */
        BUG_ON(vcpu->arch.pio.count != 1);
 
+       if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.pio.linear_rip))) {
+               vcpu->arch.pio.count = 0;
+               return 1;
+       }
+
        /* For size less than 4 we merge, else we zero extend */
        val = (vcpu->arch.pio.size < 4) ? kvm_register_read(vcpu, VCPU_REGS_RAX)
                                        : 0;
@@ -6553,7 +6583,7 @@ static int complete_fast_pio_in(struct kvm_vcpu *vcpu)
                                 vcpu->arch.pio.port, &val, 1);
        kvm_register_write(vcpu, VCPU_REGS_RAX, val);
 
-       return 1;
+       return kvm_skip_emulated_instruction(vcpu);
 }
 
 static int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size,
@@ -6572,6 +6602,7 @@ static int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size,
                return ret;
        }
 
+       vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu);
        vcpu->arch.complete_userspace_io = complete_fast_pio_in;
 
        return 0;
@@ -6579,16 +6610,13 @@ static int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size,
 
 int kvm_fast_pio(struct kvm_vcpu *vcpu, int size, unsigned short port, int in)
 {
-       int ret = kvm_skip_emulated_instruction(vcpu);
+       int ret;
 
-       /*
-        * TODO: we might be squashing a KVM_GUESTDBG_SINGLESTEP-triggered
-        * KVM_EXIT_DEBUG here.
-        */
        if (in)
-               return kvm_fast_pio_in(vcpu, size, port) && ret;
+               ret = kvm_fast_pio_in(vcpu, size, port);
        else
-               return kvm_fast_pio_out(vcpu, size, port) && ret;
+               ret = kvm_fast_pio_out(vcpu, size, port);
+       return ret && kvm_skip_emulated_instruction(vcpu);
 }
 EXPORT_SYMBOL_GPL(kvm_fast_pio);
 
@@ -7413,9 +7441,9 @@ static void enter_smm_save_state_32(struct kvm_vcpu *vcpu, char *buf)
        put_smstate(u32, buf, 0x7ef8, vcpu->arch.smbase);
 }
 
+#ifdef CONFIG_X86_64
 static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, char *buf)
 {
-#ifdef CONFIG_X86_64
        struct desc_ptr dt;
        struct kvm_segment seg;
        unsigned long val;
@@ -7465,10 +7493,8 @@ static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, char *buf)
 
        for (i = 0; i < 6; i++)
                enter_smm_save_seg_64(vcpu, buf, i);
-#else
-       WARN_ON_ONCE(1);
-#endif
 }
+#endif
 
 static void enter_smm(struct kvm_vcpu *vcpu)
 {
@@ -7479,9 +7505,11 @@ static void enter_smm(struct kvm_vcpu *vcpu)
 
        trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, true);
        memset(buf, 0, 512);
+#ifdef CONFIG_X86_64
        if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
                enter_smm_save_state_64(vcpu, buf);
        else
+#endif
                enter_smm_save_state_32(vcpu, buf);
 
        /*
@@ -7539,8 +7567,10 @@ static void enter_smm(struct kvm_vcpu *vcpu)
        kvm_set_segment(vcpu, &ds, VCPU_SREG_GS);
        kvm_set_segment(vcpu, &ds, VCPU_SREG_SS);
 
+#ifdef CONFIG_X86_64
        if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
                kvm_x86_ops->set_efer(vcpu, 0);
+#endif
 
        kvm_update_cpuid(vcpu);
        kvm_mmu_reset_context(vcpu);
@@ -7837,8 +7867,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                goto cancel_injection;
        }
 
-       kvm_load_guest_xcr0(vcpu);
-
        if (req_immediate_exit) {
                kvm_make_request(KVM_REQ_EVENT, vcpu);
                kvm_x86_ops->request_immediate_exit(vcpu);
@@ -7891,8 +7919,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
        vcpu->mode = OUTSIDE_GUEST_MODE;
        smp_wmb();
 
-       kvm_put_guest_xcr0(vcpu);
-
        kvm_before_interrupt(vcpu);
        kvm_x86_ops->handle_external_intr(vcpu);
        kvm_after_interrupt(vcpu);
@@ -8733,6 +8759,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
 
 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
 {
+       vcpu->arch.arch_capabilities = kvm_get_arch_capabilities();
        vcpu->arch.msr_platform_info = MSR_PLATFORM_INFO_CPUID_FAULT;
        kvm_vcpu_mtrr_init(vcpu);
        vcpu_load(vcpu);
@@ -9429,13 +9456,9 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
                                const struct kvm_memory_slot *new,
                                enum kvm_mr_change change)
 {
-       int nr_mmu_pages = 0;
-
        if (!kvm->arch.n_requested_mmu_pages)
-               nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
-
-       if (nr_mmu_pages)
-               kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
+               kvm_mmu_change_mmu_pages(kvm,
+                               kvm_mmu_calculate_default_mmu_pages(kvm));
 
        /*
         * Dirty logging tracks sptes in 4k granularity, meaning that large
index 28406aa1136d7eb772ed712f9df34ffe14290e66..aedc5d0d4989b3fc7422c17e55fc6b65bfef06a3 100644 (file)
@@ -347,4 +347,6 @@ static inline void kvm_after_interrupt(struct kvm_vcpu *vcpu)
        __this_cpu_write(current_vcpu, NULL);
 }
 
+void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu);
+void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu);
 #endif
index db316571452145f50832ba56ff7fb49214d4ae02..dc726e07d8ba84a6ac790c8b0929e9235e25c2fc 100644 (file)
@@ -230,7 +230,7 @@ bool mmap_address_hint_valid(unsigned long addr, unsigned long len)
 /* Can we access it for direct reading/writing? Must be RAM: */
 int valid_phys_addr_range(phys_addr_t addr, size_t count)
 {
-       return addr + count <= __pa(high_memory);
+       return addr + count - 1 <= __pa(high_memory - 1);
 }
 
 /* Can we access it through mmap? Must be a valid physical address: */
index 458a0e2bcc57ca42a10964c5b62bc8bd11ca96ea..a25a9fd987a9e5dc2ba75d7b3c91c098ad7d7034 100644 (file)
@@ -449,7 +449,7 @@ void __init efi_free_boot_services(void)
                 */
                rm_size = real_mode_size_needed();
                if (rm_size && (start + rm_size) < (1<<20) && size >= rm_size) {
-                       set_real_mode_mem(start, rm_size);
+                       set_real_mode_mem(start);
                        start += rm_size;
                        size -= rm_size;
                }
index d10105825d57a7faee5f3221d04f0e77b9807e45..7dce39c8c034a8a9f2481aff02590a793ff86498 100644 (file)
@@ -15,15 +15,6 @@ u32 *trampoline_cr4_features;
 /* Hold the pgd entry used on booting additional CPUs */
 pgd_t trampoline_pgd_entry;
 
-void __init set_real_mode_mem(phys_addr_t mem, size_t size)
-{
-       void *base = __va(mem);
-
-       real_mode_header = (struct real_mode_header *) base;
-       printk(KERN_DEBUG "Base memory trampoline at [%p] %llx size %zu\n",
-              base, (unsigned long long)mem, size);
-}
-
 void __init reserve_real_mode(void)
 {
        phys_addr_t mem;
@@ -42,7 +33,7 @@ void __init reserve_real_mode(void)
        }
 
        memblock_reserve(mem, size);
-       set_real_mode_mem(mem, size);
+       set_real_mode_mem(mem);
 }
 
 static void __init setup_real_mode(void)
index 42b6cb3d16f7b9666baf44daf0a63757786d729b..3843198e03d4ba1b8772caeada4897d2cc1fbad1 100644 (file)
@@ -15,6 +15,7 @@ generic-y += irq_work.h
 generic-y += kdebug.h
 generic-y += kmap_types.h
 generic-y += kprobes.h
+generic-y += kvm_para.h
 generic-y += local.h
 generic-y += local64.h
 generic-y += mcs_spinlock.h
index f7dd895b2353e0510a7199896030c8527695c130..0c14018d1c2601a63a92b2f29be1270d9919220c 100644 (file)
@@ -187,15 +187,18 @@ struct thread_struct {
 
 /* Clearing a0 terminates the backtrace. */
 #define start_thread(regs, new_pc, new_sp) \
-       memset(regs, 0, sizeof(*regs)); \
-       regs->pc = new_pc; \
-       regs->ps = USER_PS_VALUE; \
-       regs->areg[1] = new_sp; \
-       regs->areg[0] = 0; \
-       regs->wmask = 1; \
-       regs->depc = 0; \
-       regs->windowbase = 0; \
-       regs->windowstart = 1;
+       do { \
+               memset((regs), 0, sizeof(*(regs))); \
+               (regs)->pc = (new_pc); \
+               (regs)->ps = USER_PS_VALUE; \
+               (regs)->areg[1] = (new_sp); \
+               (regs)->areg[0] = 0; \
+               (regs)->wmask = 1; \
+               (regs)->depc = 0; \
+               (regs)->windowbase = 0; \
+               (regs)->windowstart = 1; \
+               (regs)->syscall = NO_SYSCALL; \
+       } while (0)
 
 /* Forward declaration */
 struct task_struct;
index a168bf81c7f4701a036abaa251fa5dce7fad6c19..91dc06d580603bfd8025eeda2acaf09bd2260378 100644 (file)
@@ -59,45 +59,24 @@ static inline void syscall_set_return_value(struct task_struct *task,
 
 static inline void syscall_get_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         unsigned long *args)
 {
        static const unsigned int reg[] = XTENSA_SYSCALL_ARGUMENT_REGS;
-       unsigned int j;
+       unsigned int i;
 
-       if (n == 0)
-               return;
-
-       WARN_ON_ONCE(i + n > SYSCALL_MAX_ARGS);
-
-       for (j = 0; j < n; ++j) {
-               if (i + j < SYSCALL_MAX_ARGS)
-                       args[j] = regs->areg[reg[i + j]];
-               else
-                       args[j] = 0;
-       }
+       for (i = 0; i < 6; ++i)
+               args[i] = regs->areg[reg[i]];
 }
 
 static inline void syscall_set_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         const unsigned long *args)
 {
        static const unsigned int reg[] = XTENSA_SYSCALL_ARGUMENT_REGS;
-       unsigned int j;
-
-       if (n == 0)
-               return;
-
-       if (WARN_ON_ONCE(i + n > SYSCALL_MAX_ARGS)) {
-               if (i < SYSCALL_MAX_ARGS)
-                       n = SYSCALL_MAX_ARGS - i;
-               else
-                       return;
-       }
+       unsigned int i;
 
-       for (j = 0; j < n; ++j)
-               regs->areg[reg[i + j]] = args[j];
+       for (i = 0; i < 6; ++i)
+               regs->areg[reg[i]] = args[i];
 }
 
 asmlinkage long xtensa_rt_sigreturn(struct pt_regs*);
index 8a7ad40be463656854310b85dbba06d5f9b8e189..7417847dc438e5ff6aff14f04094a1323d6b933f 100644 (file)
@@ -1,2 +1 @@
 generated-y += unistd_32.h
-generic-y += kvm_para.h
index e50f5124dc6f789c7457cb3b296d67f2b5cc76e1..e54af8b7e0f8c314830ae1ae5244f86af19a1682 100644 (file)
@@ -1860,6 +1860,8 @@ ENTRY(system_call)
        l32i    a7, a2, PT_SYSCALL
 
 1:
+       s32i    a7, a1, 4
+
        /* syscall = sys_call_table[syscall_nr] */
 
        movi    a4, sys_call_table
@@ -1893,8 +1895,12 @@ ENTRY(system_call)
        retw
 
 1:
+       l32i    a4, a1, 4
+       l32i    a3, a2, PT_SYSCALL
+       s32i    a4, a2, PT_SYSCALL
        mov     a6, a2
        call4   do_syscall_trace_leave
+       s32i    a3, a2, PT_SYSCALL
        retw
 
 ENDPROC(system_call)
index 174c11f13bba375472f77a02eca75b1408d5e2de..b9f82510c65019506ffb98f3f23ac494f7285efa 100644 (file)
@@ -253,10 +253,14 @@ static int return_address_cb(struct stackframe *frame, void *data)
        return 1;
 }
 
+/*
+ * level == 0 is for the return address from the caller of this function,
+ * not from this function itself.
+ */
 unsigned long return_address(unsigned level)
 {
        struct return_addr_data r = {
-               .skip = level + 1,
+               .skip = level,
        };
        walk_stackframe(stack_pointer(NULL), return_address_cb, &r);
        return r.addr;
index 2fb7d117222840da05f44cf7eed39348d27502e5..03678c4afc39b9e4ee94a3666b7ef630ad246d05 100644 (file)
@@ -33,7 +33,7 @@ static void * __init init_pmd(unsigned long vaddr, unsigned long n_pages)
 
        pte = memblock_alloc_low(n_pages * sizeof(pte_t), PAGE_SIZE);
        if (!pte)
-               panic("%s: Failed to allocate %zu bytes align=%lx\n",
+               panic("%s: Failed to allocate %lu bytes align=%lx\n",
                      __func__, n_pages * sizeof(pte_t), PAGE_SIZE);
 
        for (i = 0; i < n_pages; ++i)
index 4c592496a16a21655dcd8616ae4b3181111b3956..dfb8cb0af13a872737e07647de307e9e44017f0d 100644 (file)
@@ -674,7 +674,7 @@ static bool bfq_symmetric_scenario(struct bfq_data *bfqd)
         * at least two nodes.
         */
        return !(varied_queue_weights || multiple_classes_busy
-#ifdef BFQ_GROUP_IOSCHED_ENABLED
+#ifdef CONFIG_BFQ_GROUP_IOSCHED
               || bfqd->num_groups_with_pending_reqs > 0
 #endif
                );
@@ -2822,7 +2822,7 @@ static void bfq_dispatch_remove(struct request_queue *q, struct request *rq)
        bfq_remove_request(q, rq);
 }
 
-static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+static bool __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq)
 {
        /*
         * If this bfqq is shared between multiple processes, check
@@ -2855,9 +2855,11 @@ static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq)
        /*
         * All in-service entities must have been properly deactivated
         * or requeued before executing the next function, which
-        * resets all in-service entites as no more in service.
+        * resets all in-service entities as no more in service. This
+        * may cause bfqq to be freed. If this happens, the next
+        * function returns true.
         */
-       __bfq_bfqd_reset_in_service(bfqd);
+       return __bfq_bfqd_reset_in_service(bfqd);
 }
 
 /**
@@ -3262,7 +3264,6 @@ void bfq_bfqq_expire(struct bfq_data *bfqd,
        bool slow;
        unsigned long delta = 0;
        struct bfq_entity *entity = &bfqq->entity;
-       int ref;
 
        /*
         * Check whether the process is slow (see bfq_bfqq_is_slow).
@@ -3347,10 +3348,8 @@ void bfq_bfqq_expire(struct bfq_data *bfqd,
         * reason.
         */
        __bfq_bfqq_recalc_budget(bfqd, bfqq, reason);
-       ref = bfqq->ref;
-       __bfq_bfqq_expire(bfqd, bfqq);
-
-       if (ref == 1) /* bfqq is gone, no more actions on it */
+       if (__bfq_bfqq_expire(bfqd, bfqq))
+               /* bfqq is gone, no more actions on it */
                return;
 
        bfqq->injected_service = 0;
index 062e1c4787f4a9e66ac4df54d24c17d9f92c6577..86394e503ca9c0487a66d40deaa09643148ae3df 100644 (file)
@@ -995,7 +995,7 @@ bool __bfq_deactivate_entity(struct bfq_entity *entity,
                             bool ins_into_idle_tree);
 bool next_queue_may_preempt(struct bfq_data *bfqd);
 struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd);
-void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd);
+bool __bfq_bfqd_reset_in_service(struct bfq_data *bfqd);
 void bfq_deactivate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
                         bool ins_into_idle_tree, bool expiration);
 void bfq_activate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq);
index 63311d1ff1edf41823ef2790ac3175535d2f2ef5..ae4d000ac0af1c38a49c28e824ca843bfb3c531d 100644 (file)
@@ -1012,7 +1012,7 @@ static void __bfq_activate_entity(struct bfq_entity *entity,
                entity->on_st = true;
        }
 
-#ifdef BFQ_GROUP_IOSCHED_ENABLED
+#ifdef CONFIG_BFQ_GROUP_IOSCHED
        if (!bfq_entity_to_bfqq(entity)) { /* bfq_group */
                struct bfq_group *bfqg =
                        container_of(entity, struct bfq_group, entity);
@@ -1605,7 +1605,8 @@ struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd)
        return bfqq;
 }
 
-void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd)
+/* returns true if the in-service queue gets freed */
+bool __bfq_bfqd_reset_in_service(struct bfq_data *bfqd)
 {
        struct bfq_queue *in_serv_bfqq = bfqd->in_service_queue;
        struct bfq_entity *in_serv_entity = &in_serv_bfqq->entity;
@@ -1629,8 +1630,20 @@ void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd)
         * service tree either, then release the service reference to
         * the queue it represents (taken with bfq_get_entity).
         */
-       if (!in_serv_entity->on_st)
+       if (!in_serv_entity->on_st) {
+               /*
+                * If no process is referencing in_serv_bfqq any
+                * longer, then the service reference may be the only
+                * reference to the queue. If this is the case, then
+                * bfqq gets freed here.
+                */
+               int ref = in_serv_bfqq->ref;
                bfq_put_queue(in_serv_bfqq);
+               if (ref == 1)
+                       return true;
+       }
+
+       return false;
 }
 
 void bfq_deactivate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
index b64cedc7f87cf1cf5f24bf4c50c808ae6a59f210..716510ecd7ffa3f0535b49e3a443ea1ebe38ec12 100644 (file)
@@ -1298,8 +1298,11 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
                        }
                }
 
-               if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes)
+               if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes) {
+                       if (!map_data)
+                               __free_page(page);
                        break;
+               }
 
                len -= bytes;
                offset = 0;
index 4673ebe4225534dc9965089ba76ad127963dcb0f..a55389ba877964e5ad69f173ed0a3fa001bb9936 100644 (file)
@@ -1245,8 +1245,6 @@ static int blk_cloned_rq_check_limits(struct request_queue *q,
  */
 blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq)
 {
-       blk_qc_t unused;
-
        if (blk_cloned_rq_check_limits(q, rq))
                return BLK_STS_IOERR;
 
@@ -1262,7 +1260,7 @@ blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *
         * bypass a potential scheduler on the bottom device for
         * insert.
         */
-       return blk_mq_try_issue_directly(rq->mq_hctx, rq, &unused, true, true);
+       return blk_mq_request_issue_directly(rq, true);
 }
 EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
 
index 6e0f2d97fc6d8f0a5b14e6dbea23f817706bef7a..d95f9489201526081abf8b8bdcc65a525cf4c179 100644 (file)
@@ -220,7 +220,7 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
                blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq);
                flush_rq->tag = -1;
        } else {
-               blk_mq_put_driver_tag_hctx(hctx, flush_rq);
+               blk_mq_put_driver_tag(flush_rq);
                flush_rq->internal_tag = -1;
        }
 
@@ -324,7 +324,7 @@ static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
 
        if (q->elevator) {
                WARN_ON(rq->tag < 0);
-               blk_mq_put_driver_tag_hctx(hctx, rq);
+               blk_mq_put_driver_tag(rq);
        }
 
        /*
index 40905539afed347ebb7882d7e02c824c096e16ce..aa6bc5c0264388a549956c3f8acb57c1d144fb5f 100644 (file)
@@ -423,10 +423,12 @@ void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx,
                 * busy in case of 'none' scheduler, and this way may save
                 * us one extra enqueue & dequeue to sw queue.
                 */
-               if (!hctx->dispatch_busy && !e && !run_queue_async)
+               if (!hctx->dispatch_busy && !e && !run_queue_async) {
                        blk_mq_try_issue_list_directly(hctx, list);
-               else
-                       blk_mq_insert_requests(hctx, ctx, list);
+                       if (list_empty(list))
+                               return;
+               }
+               blk_mq_insert_requests(hctx, ctx, list);
        }
 
        blk_mq_run_hw_queue(hctx, run_queue_async);
index 70b210a308c452b43abd1a270e759f5e331ab55f..9516304a38ee37c70f897185e6bfe94fc7fe79f8 100644 (file)
@@ -59,7 +59,8 @@ static int blk_mq_poll_stats_bkt(const struct request *rq)
 }
 
 /*
- * Check if any of the ctx's have pending work in this hardware queue
+ * Check if any of the ctx, dispatch list or elevator
+ * have pending work in this hardware queue.
  */
 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
 {
@@ -653,6 +654,13 @@ bool blk_mq_complete_request(struct request *rq)
 }
 EXPORT_SYMBOL(blk_mq_complete_request);
 
+void blk_mq_complete_request_sync(struct request *rq)
+{
+       WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
+       rq->q->mq_ops->complete(rq);
+}
+EXPORT_SYMBOL_GPL(blk_mq_complete_request_sync);
+
 int blk_mq_request_started(struct request *rq)
 {
        return blk_mq_rq_state(rq) != MQ_RQ_IDLE;
@@ -1071,7 +1079,13 @@ static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
        hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait);
 
        spin_lock(&hctx->dispatch_wait_lock);
-       list_del_init(&wait->entry);
+       if (!list_empty(&wait->entry)) {
+               struct sbitmap_queue *sbq;
+
+               list_del_init(&wait->entry);
+               sbq = &hctx->tags->bitmap_tags;
+               atomic_dec(&sbq->ws_active);
+       }
        spin_unlock(&hctx->dispatch_wait_lock);
 
        blk_mq_run_hw_queue(hctx, true);
@@ -1087,6 +1101,7 @@ static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
 static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
                                 struct request *rq)
 {
+       struct sbitmap_queue *sbq = &hctx->tags->bitmap_tags;
        struct wait_queue_head *wq;
        wait_queue_entry_t *wait;
        bool ret;
@@ -1109,7 +1124,7 @@ static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
        if (!list_empty_careful(&wait->entry))
                return false;
 
-       wq = &bt_wait_ptr(&hctx->tags->bitmap_tags, hctx)->wait;
+       wq = &bt_wait_ptr(sbq, hctx)->wait;
 
        spin_lock_irq(&wq->lock);
        spin_lock(&hctx->dispatch_wait_lock);
@@ -1119,6 +1134,7 @@ static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
                return false;
        }
 
+       atomic_inc(&sbq->ws_active);
        wait->flags &= ~WQ_FLAG_EXCLUSIVE;
        __add_wait_queue(wq, wait);
 
@@ -1139,6 +1155,7 @@ static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
         * someone else gets the wakeup.
         */
        list_del_init(&wait->entry);
+       atomic_dec(&sbq->ws_active);
        spin_unlock(&hctx->dispatch_wait_lock);
        spin_unlock_irq(&wq->lock);
 
@@ -1701,11 +1718,12 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
        unsigned int depth;
 
        list_splice_init(&plug->mq_list, &list);
-       plug->rq_count = 0;
 
        if (plug->rq_count > 2 && plug->multiple_queues)
                list_sort(NULL, &list, plug_rq_cmp);
 
+       plug->rq_count = 0;
+
        this_q = NULL;
        this_hctx = NULL;
        this_ctx = NULL;
@@ -1790,74 +1808,76 @@ static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx,
        return ret;
 }
 
-blk_status_t blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
+static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
                                                struct request *rq,
                                                blk_qc_t *cookie,
-                                               bool bypass, bool last)
+                                               bool bypass_insert, bool last)
 {
        struct request_queue *q = rq->q;
        bool run_queue = true;
-       blk_status_t ret = BLK_STS_RESOURCE;
-       int srcu_idx;
-       bool force = false;
 
-       hctx_lock(hctx, &srcu_idx);
        /*
-        * hctx_lock is needed before checking quiesced flag.
+        * RCU or SRCU read lock is needed before checking quiesced flag.
         *
-        * When queue is stopped or quiesced, ignore 'bypass', insert
-        * and return BLK_STS_OK to caller, and avoid driver to try to
-        * dispatch again.
+        * When queue is stopped or quiesced, ignore 'bypass_insert' from
+        * blk_mq_request_issue_directly(), and return BLK_STS_OK to caller,
+        * and avoid driver to try to dispatch again.
         */
-       if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q))) {
+       if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) {
                run_queue = false;
-               bypass = false;
-               goto out_unlock;
+               bypass_insert = false;
+               goto insert;
        }
 
-       if (unlikely(q->elevator && !bypass))
-               goto out_unlock;
+       if (q->elevator && !bypass_insert)
+               goto insert;
 
        if (!blk_mq_get_dispatch_budget(hctx))
-               goto out_unlock;
+               goto insert;
 
        if (!blk_mq_get_driver_tag(rq)) {
                blk_mq_put_dispatch_budget(hctx);
-               goto out_unlock;
+               goto insert;
        }
 
-       /*
-        * Always add a request that has been through
-        *.queue_rq() to the hardware dispatch list.
-        */
-       force = true;
-       ret = __blk_mq_issue_directly(hctx, rq, cookie, last);
-out_unlock:
+       return __blk_mq_issue_directly(hctx, rq, cookie, last);
+insert:
+       if (bypass_insert)
+               return BLK_STS_RESOURCE;
+
+       blk_mq_request_bypass_insert(rq, run_queue);
+       return BLK_STS_OK;
+}
+
+static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
+               struct request *rq, blk_qc_t *cookie)
+{
+       blk_status_t ret;
+       int srcu_idx;
+
+       might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
+
+       hctx_lock(hctx, &srcu_idx);
+
+       ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false, true);
+       if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE)
+               blk_mq_request_bypass_insert(rq, true);
+       else if (ret != BLK_STS_OK)
+               blk_mq_end_request(rq, ret);
+
+       hctx_unlock(hctx, srcu_idx);
+}
+
+blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last)
+{
+       blk_status_t ret;
+       int srcu_idx;
+       blk_qc_t unused_cookie;
+       struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
+
+       hctx_lock(hctx, &srcu_idx);
+       ret = __blk_mq_try_issue_directly(hctx, rq, &unused_cookie, true, last);
        hctx_unlock(hctx, srcu_idx);
-       switch (ret) {
-       case BLK_STS_OK:
-               break;
-       case BLK_STS_DEV_RESOURCE:
-       case BLK_STS_RESOURCE:
-               if (force) {
-                       blk_mq_request_bypass_insert(rq, run_queue);
-                       /*
-                        * We have to return BLK_STS_OK for the DM
-                        * to avoid livelock. Otherwise, we return
-                        * the real result to indicate whether the
-                        * request is direct-issued successfully.
-                        */
-                       ret = bypass ? BLK_STS_OK : ret;
-               } else if (!bypass) {
-                       blk_mq_sched_insert_request(rq, false,
-                                                   run_queue, false);
-               }
-               break;
-       default:
-               if (!bypass)
-                       blk_mq_end_request(rq, ret);
-               break;
-       }
 
        return ret;
 }
@@ -1865,20 +1885,22 @@ blk_status_t blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
 void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
                struct list_head *list)
 {
-       blk_qc_t unused;
-       blk_status_t ret = BLK_STS_OK;
-
        while (!list_empty(list)) {
+               blk_status_t ret;
                struct request *rq = list_first_entry(list, struct request,
                                queuelist);
 
                list_del_init(&rq->queuelist);
-               if (ret == BLK_STS_OK)
-                       ret = blk_mq_try_issue_directly(hctx, rq, &unused,
-                                                       false,
+               ret = blk_mq_request_issue_directly(rq, list_empty(list));
+               if (ret != BLK_STS_OK) {
+                       if (ret == BLK_STS_RESOURCE ||
+                                       ret == BLK_STS_DEV_RESOURCE) {
+                               blk_mq_request_bypass_insert(rq,
                                                        list_empty(list));
-               else
-                       blk_mq_sched_insert_request(rq, false, true, false);
+                               break;
+                       }
+                       blk_mq_end_request(rq, ret);
+               }
        }
 
        /*
@@ -1886,7 +1908,7 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
         * the driver there was more coming, but that turned out to
         * be a lie.
         */
-       if (ret != BLK_STS_OK && hctx->queue->mq_ops->commit_rqs)
+       if (!list_empty(list) && hctx->queue->mq_ops->commit_rqs)
                hctx->queue->mq_ops->commit_rqs(hctx);
 }
 
@@ -1993,19 +2015,21 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
                        plug->rq_count--;
                }
                blk_add_rq_to_plug(plug, rq);
+               trace_block_plug(q);
 
                blk_mq_put_ctx(data.ctx);
 
                if (same_queue_rq) {
                        data.hctx = same_queue_rq->mq_hctx;
+                       trace_block_unplug(q, 1, true);
                        blk_mq_try_issue_directly(data.hctx, same_queue_rq,
-                                       &cookie, false, true);
+                                       &cookie);
                }
        } else if ((q->nr_hw_queues > 1 && is_sync) || (!q->elevator &&
                        !data.hctx->dispatch_busy)) {
                blk_mq_put_ctx(data.ctx);
                blk_mq_bio_to_request(rq, bio);
-               blk_mq_try_issue_directly(data.hctx, rq, &cookie, false, true);
+               blk_mq_try_issue_directly(data.hctx, rq, &cookie);
        } else {
                blk_mq_put_ctx(data.ctx);
                blk_mq_bio_to_request(rq, bio);
@@ -2322,7 +2346,7 @@ static int blk_mq_init_hctx(struct request_queue *q,
        return 0;
 
  free_fq:
-       kfree(hctx->fq);
+       blk_free_flush_queue(hctx->fq);
  exit_hctx:
        if (set->ops->exit_hctx)
                set->ops->exit_hctx(hctx, hctx_idx);
index 0ed8e5a8729fccd39d5da0e58854bdb5c5de42da..423ea88ab6fbaac08b4fe1367e6a778fdb70641a 100644 (file)
@@ -70,10 +70,8 @@ void blk_mq_request_bypass_insert(struct request *rq, bool run_queue);
 void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
                                struct list_head *list);
 
-blk_status_t blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
-                                               struct request *rq,
-                                               blk_qc_t *cookie,
-                                               bool bypass, bool last);
+/* Used by blk_insert_cloned_request() to issue request directly */
+blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last);
 void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
                                    struct list_head *list);
 
@@ -224,15 +222,6 @@ static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
        }
 }
 
-static inline void blk_mq_put_driver_tag_hctx(struct blk_mq_hw_ctx *hctx,
-                                      struct request *rq)
-{
-       if (rq->tag == -1 || rq->internal_tag == -1)
-               return;
-
-       __blk_mq_put_driver_tag(hctx, rq);
-}
-
 static inline void blk_mq_put_driver_tag(struct request *rq)
 {
        if (rq->tag == -1 || rq->internal_tag == -1)
index 62d3aa74277b4d03cb4bd1e7d5cee705864bd41b..5e9d7348c16f784f93ea117d537dbfbfe454a783 100644 (file)
@@ -81,8 +81,12 @@ acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
 
        ACPI_FUNCTION_TRACE(ev_enable_gpe);
 
-       /* Enable the requested GPE */
+       /* Clear the GPE status */
+       status = acpi_hw_clear_gpe(gpe_event_info);
+       if (ACPI_FAILURE(status))
+               return_ACPI_STATUS(status);
 
+       /* Enable the requested GPE */
        status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE);
        return_ACPI_STATUS(status);
 }
index 8638f43cfc3d87184c9a0cc91318f07ab5abff8a..79d86da1c8924a971bacf928008634c8aa3224ca 100644 (file)
@@ -186,6 +186,10 @@ void acpi_ns_detach_object(struct acpi_namespace_node *node)
                }
        }
 
+       if (obj_desc->common.type == ACPI_TYPE_REGION) {
+               acpi_ut_remove_address_range(obj_desc->region.space_id, node);
+       }
+
        /* Clear the Node entry in all cases */
 
        node->object = NULL;
index 6ecbbabf12330c316d3e28cdbef52e72548b6ef3..eec263c9019e4bd9d200cf6903857f620da962fb 100644 (file)
@@ -1043,9 +1043,6 @@ void __init acpi_early_init(void)
 
        acpi_permanent_mmap = true;
 
-       /* Initialize debug output. Linux does not use ACPICA defaults */
-       acpi_dbg_level = ACPI_LV_INFO | ACPI_LV_REPAIR;
-
 #ifdef CONFIG_X86
        /*
         * If the machine falls into the DMI check table,
index 1b207fca1420bbc5c40469e1b3558cb38a57c450..d4244e7d0e38f05cef4e5b97df55d3a9946837c1 100644 (file)
@@ -1150,8 +1150,13 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
        cpc_read(cpunum, nominal_reg, &nom);
        perf_caps->nominal_perf = nom;
 
-       cpc_read(cpunum, guaranteed_reg, &guaranteed);
-       perf_caps->guaranteed_perf = guaranteed;
+       if (guaranteed_reg->type != ACPI_TYPE_BUFFER  ||
+           IS_NULL_REG(&guaranteed_reg->cpc_entry.reg)) {
+               perf_caps->guaranteed_perf = 0;
+       } else {
+               cpc_read(cpunum, guaranteed_reg, &guaranteed);
+               perf_caps->guaranteed_perf = guaranteed;
+       }
 
        cpc_read(cpunum, lowest_non_linear_reg, &min_nonlinear);
        perf_caps->lowest_nonlinear_perf = min_nonlinear;
index 5a389a4f4f652edda26c109baf5e595bf6325903..f1ed0befe303d241c4537e446daad3726e62dbb4 100644 (file)
@@ -567,6 +567,12 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
                goto out;
        }
 
+       dev_dbg(dev, "%s cmd: %s output length: %d\n", dimm_name,
+                       cmd_name, out_obj->buffer.length);
+       print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4, 4,
+                       out_obj->buffer.pointer,
+                       min_t(u32, 128, out_obj->buffer.length), true);
+
        if (call_pkg) {
                call_pkg->nd_fw_size = out_obj->buffer.length;
                memcpy(call_pkg->nd_payload + call_pkg->nd_size_in,
@@ -585,12 +591,6 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
                return 0;
        }
 
-       dev_dbg(dev, "%s cmd: %s output length: %d\n", dimm_name,
-                       cmd_name, out_obj->buffer.length);
-       print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4, 4,
-                       out_obj->buffer.pointer,
-                       min_t(u32, 128, out_obj->buffer.length), true);
-
        for (i = 0, offset = 0; i < desc->out_num; i++) {
                u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i, buf,
                                (u32 *) out_obj->buffer.pointer,
index f70de71f79d6a699442f430dfa6606ad18a8f2dc..cddd0fcf622c3314f7115e86124e9dde5a5f98ff 100644 (file)
@@ -122,9 +122,8 @@ static int intel_security_change_key(struct nvdimm *nvdimm,
        if (!test_bit(cmd, &nfit_mem->dsm_mask))
                return -ENOTTY;
 
-       if (old_data)
-               memcpy(nd_cmd.cmd.old_pass, old_data->data,
-                               sizeof(nd_cmd.cmd.old_pass));
+       memcpy(nd_cmd.cmd.old_pass, old_data->data,
+                       sizeof(nd_cmd.cmd.old_pass));
        memcpy(nd_cmd.cmd.new_pass, new_data->data,
                        sizeof(nd_cmd.cmd.new_pass));
        rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
@@ -336,9 +335,8 @@ static int __maybe_unused intel_security_overwrite(struct nvdimm *nvdimm,
 
        /* flush all cache before we erase DIMM */
        nvdimm_invalidate_cache();
-       if (nkey)
-               memcpy(nd_cmd.cmd.passphrase, nkey->data,
-                               sizeof(nd_cmd.cmd.passphrase));
+       memcpy(nd_cmd.cmd.passphrase, nkey->data,
+                       sizeof(nd_cmd.cmd.passphrase));
        rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
        if (rc < 0)
                return rc;
index 8685882da64cdaf60dcbac09d9c61735905b5300..4b9c7ca492e6db85dad979a67c7baed7cedd972d 100644 (file)
@@ -2057,7 +2057,8 @@ static size_t binder_get_object(struct binder_proc *proc,
        size_t object_size = 0;
 
        read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
-       if (read_size < sizeof(*hdr) || !IS_ALIGNED(offset, sizeof(u32)))
+       if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
+           !IS_ALIGNED(offset, sizeof(u32)))
                return 0;
        binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
                                      offset, read_size);
index 6389467670a0bc171522a2035ae4788bb700d616..195f120c4e8c9aefa9f6e57e8ce400a8ddde95fb 100644 (file)
@@ -927,14 +927,13 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
 
        index = page - alloc->pages;
        page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
+
+       mm = alloc->vma_vm_mm;
+       if (!mmget_not_zero(mm))
+               goto err_mmget;
+       if (!down_write_trylock(&mm->mmap_sem))
+               goto err_down_write_mmap_sem_failed;
        vma = binder_alloc_get_vma(alloc);
-       if (vma) {
-               if (!mmget_not_zero(alloc->vma_vm_mm))
-                       goto err_mmget;
-               mm = alloc->vma_vm_mm;
-               if (!down_read_trylock(&mm->mmap_sem))
-                       goto err_down_write_mmap_sem_failed;
-       }
 
        list_lru_isolate(lru, item);
        spin_unlock(lock);
@@ -945,10 +944,9 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
                zap_page_range(vma, page_addr, PAGE_SIZE);
 
                trace_binder_unmap_user_end(alloc, index);
-
-               up_read(&mm->mmap_sem);
-               mmput(mm);
        }
+       up_write(&mm->mmap_sem);
+       mmput(mm);
 
        trace_binder_unmap_kernel_start(alloc, index);
 
index b3ed8f9953a862ea3ae67ef065ca5469330a44e0..173e6f2dd9af0f12afdc1fee7e372cfa4291e0aa 100644 (file)
@@ -52,38 +52,52 @@ static int eject_tray(struct ata_device *dev)
 /* Per the spec, only slot type and drawer type ODD can be supported */
 static enum odd_mech_type zpodd_get_mech_type(struct ata_device *dev)
 {
-       char buf[16];
+       char *buf;
        unsigned int ret;
-       struct rm_feature_desc *desc = (void *)(buf + 8);
+       struct rm_feature_desc *desc;
        struct ata_taskfile tf;
        static const char cdb[] = {  GPCMD_GET_CONFIGURATION,
                        2,      /* only 1 feature descriptor requested */
                        0, 3,   /* 3, removable medium feature */
                        0, 0, 0,/* reserved */
-                       0, sizeof(buf),
+                       0, 16,
                        0, 0, 0,
        };
 
+       buf = kzalloc(16, GFP_KERNEL);
+       if (!buf)
+               return ODD_MECH_TYPE_UNSUPPORTED;
+       desc = (void *)(buf + 8);
+
        ata_tf_init(dev, &tf);
        tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
        tf.command = ATA_CMD_PACKET;
        tf.protocol = ATAPI_PROT_PIO;
-       tf.lbam = sizeof(buf);
+       tf.lbam = 16;
 
        ret = ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE,
-                               buf, sizeof(buf), 0);
-       if (ret)
+                               buf, 16, 0);
+       if (ret) {
+               kfree(buf);
                return ODD_MECH_TYPE_UNSUPPORTED;
+       }
 
-       if (be16_to_cpu(desc->feature_code) != 3)
+       if (be16_to_cpu(desc->feature_code) != 3) {
+               kfree(buf);
                return ODD_MECH_TYPE_UNSUPPORTED;
+       }
 
-       if (desc->mech_type == 0 && desc->load == 0 && desc->eject == 1)
+       if (desc->mech_type == 0 && desc->load == 0 && desc->eject == 1) {
+               kfree(buf);
                return ODD_MECH_TYPE_SLOT;
-       else if (desc->mech_type == 1 && desc->load == 0 && desc->eject == 1)
+       } else if (desc->mech_type == 1 && desc->load == 0 &&
+                  desc->eject == 1) {
+               kfree(buf);
                return ODD_MECH_TYPE_DRAWER;
-       else
+       } else {
+               kfree(buf);
                return ODD_MECH_TYPE_UNSUPPORTED;
+       }
 }
 
 /* Test if ODD is zero power ready by sense code */
index 417a9f15c11631cae518a9924c0e480fd6b85fd2..d7ac09c092f2ac8a5caf8632dca569b38b9472a6 100644 (file)
@@ -1748,6 +1748,11 @@ static int __init null_init(void)
                return -EINVAL;
        }
 
+       if (g_home_node != NUMA_NO_NODE && g_home_node >= nr_online_nodes) {
+               pr_err("null_blk: invalid home_node value\n");
+               g_home_node = NUMA_NO_NODE;
+       }
+
        if (g_queue_mode == NULL_Q_RQ) {
                pr_err("null_blk: legacy IO path no longer available\n");
                return -EINVAL;
index 377a694dc22814b9d040a64a9d3ffd7666f5a6a4..6d415b20fb70651c10aa87af6b7f18e53f0aaddd 100644 (file)
@@ -314,6 +314,7 @@ static void pcd_init_units(void)
                disk->queue = blk_mq_init_sq_queue(&cd->tag_set, &pcd_mq_ops,
                                                   1, BLK_MQ_F_SHOULD_MERGE);
                if (IS_ERR(disk->queue)) {
+                       put_disk(disk);
                        disk->queue = NULL;
                        continue;
                }
@@ -750,6 +751,8 @@ static int pcd_detect(void)
 
        printk("%s: No CD-ROM drive found\n", name);
        for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
+               if (!cd->disk)
+                       continue;
                blk_cleanup_queue(cd->disk->queue);
                cd->disk->queue = NULL;
                blk_mq_free_tag_set(&cd->tag_set);
@@ -1010,8 +1013,14 @@ static int __init pcd_init(void)
        pcd_probe_capabilities();
 
        if (register_blkdev(major, name)) {
-               for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++)
+               for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
+                       if (!cd->disk)
+                               continue;
+
+                       blk_cleanup_queue(cd->disk->queue);
+                       blk_mq_free_tag_set(&cd->tag_set);
                        put_disk(cd->disk);
+               }
                return -EBUSY;
        }
 
@@ -1032,6 +1041,9 @@ static void __exit pcd_exit(void)
        int unit;
 
        for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
+               if (!cd->disk)
+                       continue;
+
                if (cd->present) {
                        del_gendisk(cd->disk);
                        pi_release(cd->pi);
index 103b617cdc3184c0a381e569fbb8d8c81894585c..35e6e271b219ccbe524e60499f78ff65fcb67994 100644 (file)
@@ -762,6 +762,8 @@ static int pf_detect(void)
 
        printk("%s: No ATAPI disk detected\n", name);
        for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
+               if (!pf->disk)
+                       continue;
                blk_cleanup_queue(pf->disk->queue);
                pf->disk->queue = NULL;
                blk_mq_free_tag_set(&pf->tag_set);
@@ -1029,8 +1031,13 @@ static int __init pf_init(void)
        pf_busy = 0;
 
        if (register_blkdev(major, name)) {
-               for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++)
+               for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
+                       if (!pf->disk)
+                               continue;
+                       blk_cleanup_queue(pf->disk->queue);
+                       blk_mq_free_tag_set(&pf->tag_set);
                        put_disk(pf->disk);
+               }
                return -EBUSY;
        }
 
@@ -1051,6 +1058,9 @@ static void __exit pf_exit(void)
        int unit;
        unregister_blkdev(major, name);
        for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
+               if (!pf->disk)
+                       continue;
+
                if (pf->present)
                        del_gendisk(pf->disk);
 
index 4bc083b7c9b541a0fede52156bdae003c1d678df..2a7ca4a1e6f7bd5e2730b13a30760f9506c6973c 100644 (file)
@@ -513,6 +513,8 @@ static int init_vq(struct virtio_blk *vblk)
        if (err)
                num_vqs = 1;
 
+       num_vqs = min_t(unsigned int, nr_cpu_ids, num_vqs);
+
        vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL);
        if (!vblk->vqs)
                return -ENOMEM;
index 87ccef4bd69e904b1f19403e82403bd5cd13a277..32a21b8d1d85f430a031d342bff3a646c5ffa427 100644 (file)
@@ -1090,6 +1090,8 @@ static int ace_setup(struct ace_device *ace)
        return 0;
 
 err_read:
+       /* prevent double queue cleanup */
+       ace->gd->queue = NULL;
        put_disk(ace->gd);
 err_alloc_disk:
        blk_cleanup_queue(ace->queue);
index e7a5f1d1c3141acf9ef79c79fc5829886ef73245..399cad7daae77b37508033ec1cac61bebefbc550 100644 (file)
@@ -290,18 +290,8 @@ static ssize_t idle_store(struct device *dev,
        struct zram *zram = dev_to_zram(dev);
        unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
        int index;
-       char mode_buf[8];
-       ssize_t sz;
 
-       sz = strscpy(mode_buf, buf, sizeof(mode_buf));
-       if (sz <= 0)
-               return -EINVAL;
-
-       /* ignore trailing new line */
-       if (mode_buf[sz - 1] == '\n')
-               mode_buf[sz - 1] = 0x00;
-
-       if (strcmp(mode_buf, "all"))
+       if (!sysfs_streq(buf, "all"))
                return -EINVAL;
 
        down_read(&zram->init_lock);
@@ -635,25 +625,15 @@ static ssize_t writeback_store(struct device *dev,
        struct bio bio;
        struct bio_vec bio_vec;
        struct page *page;
-       ssize_t ret, sz;
-       char mode_buf[8];
-       int mode = -1;
+       ssize_t ret;
+       int mode;
        unsigned long blk_idx = 0;
 
-       sz = strscpy(mode_buf, buf, sizeof(mode_buf));
-       if (sz <= 0)
-               return -EINVAL;
-
-       /* ignore trailing newline */
-       if (mode_buf[sz - 1] == '\n')
-               mode_buf[sz - 1] = 0x00;
-
-       if (!strcmp(mode_buf, "idle"))
+       if (sysfs_streq(buf, "idle"))
                mode = IDLE_WRITEBACK;
-       else if (!strcmp(mode_buf, "huge"))
+       else if (sysfs_streq(buf, "huge"))
                mode = HUGE_WRITEBACK;
-
-       if (mode == -1)
+       else
                return -EINVAL;
 
        down_read(&zram->init_lock);
index ded198328f216066959825950ebfbe5aef29027d..7db48ae65cd2dc946b6a1757baa20993158b903e 100644 (file)
@@ -2942,6 +2942,7 @@ static int btusb_config_oob_wake(struct hci_dev *hdev)
                return 0;
        }
 
+       irq_set_status_flags(irq, IRQ_NOAUTOEN);
        ret = devm_request_irq(&hdev->dev, irq, btusb_oob_wake_handler,
                               0, "OOB Wake-on-BT", data);
        if (ret) {
@@ -2956,7 +2957,6 @@ static int btusb_config_oob_wake(struct hci_dev *hdev)
        }
 
        data->oob_wake_irq = irq;
-       disable_irq(irq);
        bt_dev_info(hdev, "OOB Wake-on-BT configured at IRQ %u", irq);
        return 0;
 }
index 72866a004f075b79257c9d2df0c7b5b60852c31e..466ebd84ad1774096ecc45dd9f4ebe13ac785602 100644 (file)
@@ -348,7 +348,7 @@ config XILINX_HWICAP
 
 config R3964
        tristate "Siemens R3964 line discipline"
-       depends on TTY
+       depends on TTY && BROKEN
        ---help---
          This driver allows synchronous communication with devices using the
          Siemens R3964 packet protocol. Unless you are dealing with special
index ff0b199be4729757743bbd72bff2fc61a842d291..f2411468f33ff44707e45ab34cd359d2c3b5a0f0 100644 (file)
@@ -66,7 +66,6 @@ static void __init dmi_add_platform_ipmi(unsigned long base_addr,
                return;
        }
 
-       memset(&p, 0, sizeof(p));
        p.addr = base_addr;
        p.space = space;
        p.regspacing = offset;
index e8ba678347466db181a08768158e56930090aa7b..00bf4b17edbfafb5c9d25cb524f35e8d59c7a074 100644 (file)
@@ -214,6 +214,9 @@ struct ipmi_user {
 
        /* Does this interface receive IPMI events? */
        bool gets_events;
+
+       /* Free must run in process context for RCU cleanup. */
+       struct work_struct remove_work;
 };
 
 static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user, int *index)
@@ -1157,6 +1160,15 @@ static int intf_err_seq(struct ipmi_smi *intf,
        return rv;
 }
 
+static void free_user_work(struct work_struct *work)
+{
+       struct ipmi_user *user = container_of(work, struct ipmi_user,
+                                             remove_work);
+
+       cleanup_srcu_struct(&user->release_barrier);
+       kfree(user);
+}
+
 int ipmi_create_user(unsigned int          if_num,
                     const struct ipmi_user_hndl *handler,
                     void                  *handler_data,
@@ -1200,6 +1212,8 @@ int ipmi_create_user(unsigned int          if_num,
        goto out_kfree;
 
  found:
+       INIT_WORK(&new_user->remove_work, free_user_work);
+
        rv = init_srcu_struct(&new_user->release_barrier);
        if (rv)
                goto out_kfree;
@@ -1260,8 +1274,9 @@ EXPORT_SYMBOL(ipmi_get_smi_info);
 static void free_user(struct kref *ref)
 {
        struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
-       cleanup_srcu_struct(&user->release_barrier);
-       kfree(user);
+
+       /* SRCU cleanup must happen in task context. */
+       schedule_work(&user->remove_work);
 }
 
 static void _ipmi_destroy_user(struct ipmi_user *user)
index 01946cad3d1381ba7eed020544c775ed9a6e3f5f..682221eebd66101cb67b04e2ac979ab7caeae51c 100644 (file)
@@ -118,6 +118,8 @@ void __init ipmi_hardcode_init(void)
        char *str;
        char *si_type[SI_MAX_PARMS];
 
+       memset(si_type, 0, sizeof(si_type));
+
        /* Parse out the si_type string into its components. */
        str = si_type_str;
        if (*str != '\0') {
index d8b77133a83a2a2c59d3d7873db8d4d110f5dfb0..f824563fc28dd091303f89de651589ea5350a64b 100644 (file)
@@ -37,8 +37,8 @@
  *
  * Returns size of the event. If it is an invalid event, returns 0.
  */
-static int calc_tpm2_event_size(struct tcg_pcr_event2_head *event,
-                               struct tcg_pcr_event *event_header)
+static size_t calc_tpm2_event_size(struct tcg_pcr_event2_head *event,
+                                  struct tcg_pcr_event *event_header)
 {
        struct tcg_efi_specid_event_head *efispecid;
        struct tcg_event_field *event_field;
index 8856cce5a23b2858b58b69373f4cd89e2f898abb..817ae09a369ec2ba192a68f302205eaef7aadeb5 100644 (file)
@@ -233,12 +233,19 @@ __poll_t tpm_common_poll(struct file *file, poll_table *wait)
        __poll_t mask = 0;
 
        poll_wait(file, &priv->async_wait, wait);
+       mutex_lock(&priv->buffer_mutex);
 
-       if (!priv->response_read || priv->response_length)
+       /*
+        * The response_length indicates if there is still response
+        * (or part of it) to be consumed. Partial reads decrease it
+        * by the number of bytes read, and write resets it the zero.
+        */
+       if (priv->response_length)
                mask = EPOLLIN | EPOLLRDNORM;
        else
                mask = EPOLLOUT | EPOLLWRNORM;
 
+       mutex_unlock(&priv->buffer_mutex);
        return mask;
 }
 
index 83ece5639f8639e7bb397ec4feeb121006073208..ae1030c9b086de511aa5c2bd0800fdcf6c2a1051 100644 (file)
@@ -402,15 +402,13 @@ int tpm_pm_suspend(struct device *dev)
        if (chip->flags & TPM_CHIP_FLAG_ALWAYS_POWERED)
                return 0;
 
-       if (chip->flags & TPM_CHIP_FLAG_TPM2) {
-               mutex_lock(&chip->tpm_mutex);
-               if (!tpm_chip_start(chip)) {
+       if (!tpm_chip_start(chip)) {
+               if (chip->flags & TPM_CHIP_FLAG_TPM2)
                        tpm2_shutdown(chip, TPM2_SU_STATE);
-                       tpm_chip_stop(chip);
-               }
-               mutex_unlock(&chip->tpm_mutex);
-       } else {
-               rc = tpm1_pm_suspend(chip, tpm_suspend_pcr);
+               else
+                       rc = tpm1_pm_suspend(chip, tpm_suspend_pcr);
+
+               tpm_chip_stop(chip);
        }
 
        return rc;
index 89d6f3736dbf605036e4eefb70efd2ef2ee4f386..f8edbb65eda3564cf99623c852fe474a93803123 100644 (file)
@@ -20,8 +20,7 @@
 #define PROG_ID_MAX            7
 
 #define PROG_STATUS_MASK(id)   (1 << ((id) + 8))
-#define PROG_PRES_MASK         0x7
-#define PROG_PRES(layout, pckr)        ((pckr >> layout->pres_shift) & PROG_PRES_MASK)
+#define PROG_PRES(layout, pckr)        ((pckr >> layout->pres_shift) & layout->pres_mask)
 #define PROG_MAX_RM9200_CSS    3
 
 struct clk_programmable {
@@ -37,20 +36,29 @@ static unsigned long clk_programmable_recalc_rate(struct clk_hw *hw,
                                                  unsigned long parent_rate)
 {
        struct clk_programmable *prog = to_clk_programmable(hw);
+       const struct clk_programmable_layout *layout = prog->layout;
        unsigned int pckr;
+       unsigned long rate;
 
        regmap_read(prog->regmap, AT91_PMC_PCKR(prog->id), &pckr);
 
-       return parent_rate >> PROG_PRES(prog->layout, pckr);
+       if (layout->is_pres_direct)
+               rate = parent_rate / (PROG_PRES(layout, pckr) + 1);
+       else
+               rate = parent_rate >> PROG_PRES(layout, pckr);
+
+       return rate;
 }
 
 static int clk_programmable_determine_rate(struct clk_hw *hw,
                                           struct clk_rate_request *req)
 {
+       struct clk_programmable *prog = to_clk_programmable(hw);
+       const struct clk_programmable_layout *layout = prog->layout;
        struct clk_hw *parent;
        long best_rate = -EINVAL;
        unsigned long parent_rate;
-       unsigned long tmp_rate;
+       unsigned long tmp_rate = 0;
        int shift;
        int i;
 
@@ -60,10 +68,18 @@ static int clk_programmable_determine_rate(struct clk_hw *hw,
                        continue;
 
                parent_rate = clk_hw_get_rate(parent);
-               for (shift = 0; shift < PROG_PRES_MASK; shift++) {
-                       tmp_rate = parent_rate >> shift;
-                       if (tmp_rate <= req->rate)
-                               break;
+               if (layout->is_pres_direct) {
+                       for (shift = 0; shift <= layout->pres_mask; shift++) {
+                               tmp_rate = parent_rate / (shift + 1);
+                               if (tmp_rate <= req->rate)
+                                       break;
+                       }
+               } else {
+                       for (shift = 0; shift < layout->pres_mask; shift++) {
+                               tmp_rate = parent_rate >> shift;
+                               if (tmp_rate <= req->rate)
+                                       break;
+                       }
                }
 
                if (tmp_rate > req->rate)
@@ -137,16 +153,23 @@ static int clk_programmable_set_rate(struct clk_hw *hw, unsigned long rate,
        if (!div)
                return -EINVAL;
 
-       shift = fls(div) - 1;
+       if (layout->is_pres_direct) {
+               shift = div - 1;
 
-       if (div != (1 << shift))
-               return -EINVAL;
+               if (shift > layout->pres_mask)
+                       return -EINVAL;
+       } else {
+               shift = fls(div) - 1;
 
-       if (shift >= PROG_PRES_MASK)
-               return -EINVAL;
+               if (div != (1 << shift))
+                       return -EINVAL;
+
+               if (shift >= layout->pres_mask)
+                       return -EINVAL;
+       }
 
        regmap_update_bits(prog->regmap, AT91_PMC_PCKR(prog->id),
-                          PROG_PRES_MASK << layout->pres_shift,
+                          layout->pres_mask << layout->pres_shift,
                           shift << layout->pres_shift);
 
        return 0;
@@ -202,19 +225,25 @@ at91_clk_register_programmable(struct regmap *regmap,
 }
 
 const struct clk_programmable_layout at91rm9200_programmable_layout = {
+       .pres_mask = 0x7,
        .pres_shift = 2,
        .css_mask = 0x3,
        .have_slck_mck = 0,
+       .is_pres_direct = 0,
 };
 
 const struct clk_programmable_layout at91sam9g45_programmable_layout = {
+       .pres_mask = 0x7,
        .pres_shift = 2,
        .css_mask = 0x3,
        .have_slck_mck = 1,
+       .is_pres_direct = 0,
 };
 
 const struct clk_programmable_layout at91sam9x5_programmable_layout = {
+       .pres_mask = 0x7,
        .pres_shift = 4,
        .css_mask = 0x7,
        .have_slck_mck = 0,
+       .is_pres_direct = 0,
 };
index 672a79bda88c960d7655a600834705152f5a4eaf..a0e5ce9c9b9ea6981948ed102be4f8a870e6a99c 100644 (file)
@@ -71,9 +71,11 @@ struct clk_pll_characteristics {
 };
 
 struct clk_programmable_layout {
+       u8 pres_mask;
        u8 pres_shift;
        u8 css_mask;
        u8 have_slck_mck;
+       u8 is_pres_direct;
 };
 
 extern const struct clk_programmable_layout at91rm9200_programmable_layout;
index 1f70cb164b06f310d867d54797dfd0d87be0a1d3..81943fac4537ef3c8e8d0f611a897b2dd6b8171f 100644 (file)
@@ -125,6 +125,14 @@ static const struct {
          .pll = true },
 };
 
+static const struct clk_programmable_layout sama5d2_programmable_layout = {
+       .pres_mask = 0xff,
+       .pres_shift = 4,
+       .css_mask = 0x7,
+       .have_slck_mck = 0,
+       .is_pres_direct = 1,
+};
+
 static void __init sama5d2_pmc_setup(struct device_node *np)
 {
        struct clk_range range = CLK_RANGE(0, 0);
@@ -249,7 +257,7 @@ static void __init sama5d2_pmc_setup(struct device_node *np)
 
                hw = at91_clk_register_programmable(regmap, name,
                                                    parent_names, 6, i,
-                                                   &at91sam9x5_programmable_layout);
+                                                   &sama5d2_programmable_layout);
                if (IS_ERR(hw))
                        goto err_free;
        }
index 1acfa3e3cfb401667fbb19666f1aac2add081826..113d71042199b3d3599da84df07e932b1c7902a9 100644 (file)
@@ -362,7 +362,7 @@ struct clk *imx_clk_pll14xx(const char *name, const char *parent_name,
 
        switch (pll_clk->type) {
        case PLL_1416X:
-               if (!pll->rate_table)
+               if (!pll_clk->rate_table)
                        init.ops = &clk_pll1416x_min_ops;
                else
                        init.ops = &clk_pll1416x_ops;
index 9628d4e7690bbdc632f2930bd6feb9653347595b..85daf826619ab4fe707b03211243279a06464625 100644 (file)
@@ -169,11 +169,10 @@ struct clk *mtk_clk_register_gate(
                return ERR_PTR(-ENOMEM);
 
        init.name = name;
-       init.flags = CLK_SET_RATE_PARENT;
+       init.flags = flags | CLK_SET_RATE_PARENT;
        init.parent_names = parent_name ? &parent_name : NULL;
        init.num_parents = parent_name ? 1 : 0;
        init.ops = ops;
-       init.flags = flags;
 
        cg->regmap = regmap;
        cg->set_ofs = set_ofs;
index 41e16dd7272a5943c842eda7064bd6c0394c3664..7a14ac9b2fecfece592807d72c38043bb84c12e4 100644 (file)
@@ -120,7 +120,7 @@ static bool meson_clk_pll_is_better(unsigned long rate,
                        return true;
        } else {
                /* Round down */
-               if (now < rate && best < now)
+               if (now <= rate && best < now)
                        return true;
        }
 
index 0e1ce8c03259b73221266de7aa6491be331815f3..f7b11e1eeebe894c26425067fb5cf14cab09a029 100644 (file)
@@ -960,14 +960,14 @@ static struct clk_regmap g12a_sd_emmc_c_clk0 = {
 /* VPU Clock */
 
 static const char * const g12a_vpu_parent_names[] = {
-       "fclk_div4", "fclk_div3", "fclk_div5", "fclk_div7",
+       "fclk_div3", "fclk_div4", "fclk_div5", "fclk_div7",
        "mpll1", "vid_pll", "hifi_pll", "gp0_pll",
 };
 
 static struct clk_regmap g12a_vpu_0_sel = {
        .data = &(struct clk_regmap_mux_data){
                .offset = HHI_VPU_CLK_CNTL,
-               .mask = 0x3,
+               .mask = 0x7,
                .shift = 9,
        },
        .hw.init = &(struct clk_init_data){
@@ -1011,7 +1011,7 @@ static struct clk_regmap g12a_vpu_0 = {
 static struct clk_regmap g12a_vpu_1_sel = {
        .data = &(struct clk_regmap_mux_data){
                .offset = HHI_VPU_CLK_CNTL,
-               .mask = 0x3,
+               .mask = 0x7,
                .shift = 25,
        },
        .hw.init = &(struct clk_init_data){
index 04df2e208ed6ec7a493f62debdbf9051e0528038..29ffb4fde7145adefe855c11283825e1e4ea1f11 100644 (file)
@@ -2216,6 +2216,7 @@ static struct clk_regmap gxbb_vdec_1_div = {
                .offset = HHI_VDEC_CLK_CNTL,
                .shift = 0,
                .width = 7,
+               .flags = CLK_DIVIDER_ROUND_CLOSEST,
        },
        .hw.init = &(struct clk_init_data){
                .name = "vdec_1_div",
@@ -2261,6 +2262,7 @@ static struct clk_regmap gxbb_vdec_hevc_div = {
                .offset = HHI_VDEC2_CLK_CNTL,
                .shift = 16,
                .width = 7,
+               .flags = CLK_DIVIDER_ROUND_CLOSEST,
        },
        .hw.init = &(struct clk_init_data){
                .name = "vdec_hevc_div",
index 08bcc01c0923863790d32dafbc1274bdf2358a28..daff235bc763348a03bebdcd9695cc8e08856744 100644 (file)
@@ -82,8 +82,8 @@ static unsigned long meson_vid_pll_div_recalc_rate(struct clk_hw *hw,
        div = _get_table_val(meson_parm_read(clk->map, &pll_div->val),
                             meson_parm_read(clk->map, &pll_div->sel));
        if (!div || !div->divider) {
-               pr_info("%s: Invalid config value for vid_pll_div\n", __func__);
-               return parent_rate;
+               pr_debug("%s: Invalid config value for vid_pll_div\n", __func__);
+               return 0;
        }
 
        return DIV_ROUND_UP_ULL(parent_rate * div->multiplier, div->divider);
index d977193842dfed1fead553dd2240013c5a0d380a..19174835693b91cd473b59c7fed787f128f268d1 100644 (file)
@@ -165,7 +165,7 @@ static const struct clk_ops plt_clk_ops = {
 };
 
 static struct clk_plt *plt_clk_register(struct platform_device *pdev, int id,
-                                       void __iomem *base,
+                                       const struct pmc_clk_data *pmc_data,
                                        const char **parent_names,
                                        int num_parents)
 {
@@ -184,9 +184,17 @@ static struct clk_plt *plt_clk_register(struct platform_device *pdev, int id,
        init.num_parents = num_parents;
 
        pclk->hw.init = &init;
-       pclk->reg = base + PMC_CLK_CTL_OFFSET + id * PMC_CLK_CTL_SIZE;
+       pclk->reg = pmc_data->base + PMC_CLK_CTL_OFFSET + id * PMC_CLK_CTL_SIZE;
        spin_lock_init(&pclk->lock);
 
+       /*
+        * On some systems, the pmc_plt_clocks already enabled by the
+        * firmware are being marked as critical to avoid them being
+        * gated by the clock framework.
+        */
+       if (pmc_data->critical && plt_clk_is_enabled(&pclk->hw))
+               init.flags |= CLK_IS_CRITICAL;
+
        ret = devm_clk_hw_register(&pdev->dev, &pclk->hw);
        if (ret) {
                pclk = ERR_PTR(ret);
@@ -332,7 +340,7 @@ static int plt_clk_probe(struct platform_device *pdev)
                return PTR_ERR(parent_names);
 
        for (i = 0; i < PMC_CLK_NUM; i++) {
-               data->clks[i] = plt_clk_register(pdev, i, pmc_data->base,
+               data->clks[i] = plt_clk_register(pdev, i, pmc_data,
                                                 parent_names, data->nparents);
                if (IS_ERR(data->clks[i])) {
                        err = PTR_ERR(data->clks[i]);
index e22f0dbaebb1d97e407d0be2b67da9b25f8af8dc..2986119dd31fb8391e3256a88942272870acad44 100644 (file)
@@ -385,7 +385,10 @@ static int intel_pstate_get_cppc_guranteed(int cpu)
        if (ret)
                return ret;
 
-       return cppc_perf.guaranteed_perf;
+       if (cppc_perf.guaranteed_perf)
+               return cppc_perf.guaranteed_perf;
+
+       return cppc_perf.nominal_perf;
 }
 
 #else /* CONFIG_ACPI_CPPC_LIB */
@@ -2593,6 +2596,9 @@ static int __init intel_pstate_init(void)
        const struct x86_cpu_id *id;
        int rc;
 
+       if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
+               return -ENODEV;
+
        if (no_load)
                return -ENODEV;
 
@@ -2608,7 +2614,7 @@ static int __init intel_pstate_init(void)
        } else {
                id = x86_match_cpu(intel_pstate_cpu_ids);
                if (!id) {
-                       pr_info("CPU ID not supported\n");
+                       pr_info("CPU model not supported\n");
                        return -ENODEV;
                }
 
index 3f49427766b8810cb361ce2d8a1b8331541da5c0..2b51e0718c9f6e493b8659a2ad3dd24de8eaab0d 100644 (file)
@@ -189,8 +189,8 @@ static int scpi_cpufreq_exit(struct cpufreq_policy *policy)
 
        clk_put(priv->clk);
        dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
-       kfree(priv);
        dev_pm_opp_remove_all_dynamic(priv->cpu_dev);
+       kfree(priv);
 
        return 0;
 }
index b1eadc6652b5f236897811357bd49ac364498f5e..7205d9f4029e11adb7f49d0e57bfdbf11b069fc0 100644 (file)
@@ -865,19 +865,18 @@ static int ahash_update_ctx(struct ahash_request *req)
                if (ret)
                        goto unmap_ctx;
 
-               if (mapped_nents) {
+               if (mapped_nents)
                        sg_to_sec4_sg_last(req->src, mapped_nents,
                                           edesc->sec4_sg + sec4_sg_src_index,
                                           0);
-                       if (*next_buflen)
-                               scatterwalk_map_and_copy(next_buf, req->src,
-                                                        to_hash - *buflen,
-                                                        *next_buflen, 0);
-               } else {
+               else
                        sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index -
                                            1);
-               }
 
+               if (*next_buflen)
+                       scatterwalk_map_and_copy(next_buf, req->src,
+                                                to_hash - *buflen,
+                                                *next_buflen, 0);
                desc = edesc->hw_desc;
 
                edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
index 4e0eede599a8dedd8c306a65c12366a5c857a4ae..ac0301b695937c1168cac2055a5af41d47536379 100644 (file)
@@ -1578,11 +1578,9 @@ static int stm32_mdma_probe(struct platform_device *pdev)
 
        dmadev->nr_channels = nr_channels;
        dmadev->nr_requests = nr_requests;
-       ret = device_property_read_u32_array(&pdev->dev, "st,ahb-addr-masks",
+       device_property_read_u32_array(&pdev->dev, "st,ahb-addr-masks",
                                       dmadev->ahb_addr_masks,
                                       count);
-       if (ret)
-               return ret;
        dmadev->nr_ahb_addr_masks = count;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
index 91b90c0cea731778bd524a13d801433e7df0a2ab..12acdac858208979438491e90c3782b064f9e952 100644 (file)
@@ -132,8 +132,10 @@ static int adnp_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
        if (err < 0)
                goto out;
 
-       if (err & BIT(pos))
-               err = -EACCES;
+       if (value & BIT(pos)) {
+               err = -EPERM;
+               goto out;
+       }
 
        err = 0;
 
index 854bce4fb9e7209b2697e9c662e65e4456c969c3..217507002dbc38ce7a34c64df751ac5887d304d6 100644 (file)
@@ -1224,6 +1224,8 @@ static int __init aspeed_gpio_probe(struct platform_device *pdev)
 
        gpio->offset_timer =
                devm_kzalloc(&pdev->dev, gpio->chip.ngpio, GFP_KERNEL);
+       if (!gpio->offset_timer)
+               return -ENOMEM;
 
        return aspeed_gpio_setup_irqs(gpio, pdev);
 }
index 0ecd2369c2cad0daa5e08696ab85b91af5235a26..a09d2f9ebacc8d4909d79119333e344453ea6e0a 100644 (file)
@@ -148,6 +148,8 @@ static int gpio_exar_probe(struct platform_device *pdev)
        mutex_init(&exar_gpio->lock);
 
        index = ida_simple_get(&ida_index, 0, 0, GFP_KERNEL);
+       if (index < 0)
+               goto err_destroy;
 
        sprintf(exar_gpio->name, "exar_gpio%d", index);
        exar_gpio->gpio_chip.label = exar_gpio->name;
index 154d959e899323dcea54b2829c6a70a4127bf579..b6a4efce7c9285f0a26411246d615c90f498d0be 100644 (file)
@@ -204,8 +204,8 @@ static ssize_t gpio_mockup_debugfs_read(struct file *file,
        struct gpio_mockup_chip *chip;
        struct seq_file *sfile;
        struct gpio_chip *gc;
+       int val, cnt;
        char buf[3];
-       int val, rv;
 
        if (*ppos != 0)
                return 0;
@@ -216,13 +216,9 @@ static ssize_t gpio_mockup_debugfs_read(struct file *file,
        gc = &chip->gc;
 
        val = gpio_mockup_get(gc, priv->offset);
-       snprintf(buf, sizeof(buf), "%d\n", val);
+       cnt = snprintf(buf, sizeof(buf), "%d\n", val);
 
-       rv = copy_to_user(usr_buf, buf, sizeof(buf));
-       if (rv)
-               return rv;
-
-       return sizeof(buf) - 1;
+       return simple_read_from_buffer(usr_buf, size, ppos, buf, cnt);
 }
 
 static ssize_t gpio_mockup_debugfs_write(struct file *file,
index 8b9c3ab70f6eade458a501184ce764454819043d..6a3ec575a404ed9fa3dfcf8c78e56381789a8c02 100644 (file)
@@ -120,7 +120,8 @@ static void of_gpio_flags_quirks(struct device_node *np,
         * to determine if the flags should have inverted semantics.
         */
        if (IS_ENABLED(CONFIG_SPI_MASTER) &&
-           of_property_read_bool(np, "cs-gpios")) {
+           of_property_read_bool(np, "cs-gpios") &&
+           !strcmp(propname, "cs-gpios")) {
                struct device_node *child;
                u32 cs;
                int ret;
@@ -142,16 +143,16 @@ static void of_gpio_flags_quirks(struct device_node *np,
                                 * conflict and the "spi-cs-high" flag will
                                 * take precedence.
                                 */
-                               if (of_property_read_bool(np, "spi-cs-high")) {
+                               if (of_property_read_bool(child, "spi-cs-high")) {
                                        if (*flags & OF_GPIO_ACTIVE_LOW) {
                                                pr_warn("%s GPIO handle specifies active low - ignored\n",
-                                                       of_node_full_name(np));
+                                                       of_node_full_name(child));
                                                *flags &= ~OF_GPIO_ACTIVE_LOW;
                                        }
                                } else {
                                        if (!(*flags & OF_GPIO_ACTIVE_LOW))
                                                pr_info("%s enforce active low on chipselect handle\n",
-                                                       of_node_full_name(np));
+                                                       of_node_full_name(child));
                                        *flags |= OF_GPIO_ACTIVE_LOW;
                                }
                                break;
@@ -717,7 +718,13 @@ int of_gpiochip_add(struct gpio_chip *chip)
 
        of_node_get(chip->of_node);
 
-       return of_gpiochip_scan_gpios(chip);
+       status = of_gpiochip_scan_gpios(chip);
+       if (status) {
+               of_node_put(chip->of_node);
+               gpiochip_remove_pin_ranges(chip);
+       }
+
+       return status;
 }
 
 void of_gpiochip_remove(struct gpio_chip *chip)
index 144af07335815998c7238b72e01882cf26692ef8..0495bf1d480a4cfe464e8ff330922264d03deff7 100644 (file)
@@ -2776,7 +2776,7 @@ int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
        }
 
        config = pinconf_to_config_packed(PIN_CONFIG_INPUT_DEBOUNCE, debounce);
-       return gpio_set_config(chip, gpio_chip_hwgpio(desc), config);
+       return chip->set_config(chip, gpio_chip_hwgpio(desc), config);
 }
 EXPORT_SYMBOL_GPL(gpiod_set_debounce);
 
@@ -2813,7 +2813,7 @@ int gpiod_set_transitory(struct gpio_desc *desc, bool transitory)
        packed = pinconf_to_config_packed(PIN_CONFIG_PERSIST_STATE,
                                          !transitory);
        gpio = gpio_chip_hwgpio(desc);
-       rc = gpio_set_config(chip, gpio, packed);
+       rc = chip->set_config(chip, gpio, packed);
        if (rc == -ENOTSUPP) {
                dev_dbg(&desc->gdev->dev, "Persistence not supported for GPIO %d\n",
                                gpio);
index 4f8fb4ecde3419fe8449ddfcea859f17242e6919..5d8b30fd4534582bbf0203343df180927f5bdbc1 100644 (file)
@@ -3173,11 +3173,16 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
                        break;
 
                if (fence) {
-                       r = dma_fence_wait_timeout(fence, false, tmo);
+                       tmo = dma_fence_wait_timeout(fence, false, tmo);
                        dma_fence_put(fence);
                        fence = next;
-                       if (r <= 0)
+                       if (tmo == 0) {
+                               r = -ETIMEDOUT;
                                break;
+                       } else if (tmo < 0) {
+                               r = tmo;
+                               break;
+                       }
                } else {
                        fence = next;
                }
@@ -3188,8 +3193,8 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
                tmo = dma_fence_wait_timeout(fence, false, tmo);
        dma_fence_put(fence);
 
-       if (r <= 0 || tmo <= 0) {
-               DRM_ERROR("recover vram bo from shadow failed\n");
+       if (r < 0 || tmo <= 0) {
+               DRM_ERROR("recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
                return -EIO;
        }
 
@@ -3625,6 +3630,7 @@ static void amdgpu_device_get_min_pci_speed_width(struct amdgpu_device *adev,
        struct pci_dev *pdev = adev->pdev;
        enum pci_bus_speed cur_speed;
        enum pcie_link_width cur_width;
+       u32 ret = 1;
 
        *speed = PCI_SPEED_UNKNOWN;
        *width = PCIE_LNK_WIDTH_UNKNOWN;
@@ -3632,6 +3638,10 @@ static void amdgpu_device_get_min_pci_speed_width(struct amdgpu_device *adev,
        while (pdev) {
                cur_speed = pcie_get_speed_cap(pdev);
                cur_width = pcie_get_width_cap(pdev);
+               ret = pcie_bandwidth_available(adev->pdev, NULL,
+                                                      NULL, &cur_width);
+               if (!ret)
+                       cur_width = PCIE_LNK_WIDTH_RESRV;
 
                if (cur_speed != PCI_SPEED_UNKNOWN) {
                        if (*speed == PCI_SPEED_UNKNOWN)
index 0b8ef2d27d6b2b8e60e0959f0cb8e742e4de3c7f..fe393a46f8811dc452dc3db6d062d0aa850e6b47 100644 (file)
@@ -35,6 +35,7 @@
 #include "amdgpu_trace.h"
 
 #define AMDGPU_IB_TEST_TIMEOUT msecs_to_jiffies(1000)
+#define AMDGPU_IB_TEST_GFX_XGMI_TIMEOUT        msecs_to_jiffies(2000)
 
 /*
  * IB
@@ -344,6 +345,8 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
                 * cost waiting for it coming back under RUNTIME only
                */
                tmo_gfx = 8 * AMDGPU_IB_TEST_TIMEOUT;
+       } else if (adev->gmc.xgmi.hive_id) {
+               tmo_gfx = AMDGPU_IB_TEST_GFX_XGMI_TIMEOUT;
        }
 
        for (i = 0; i < adev->num_rings; ++i) {
index d0309e8c9d12cdafa95d2a23e84018f4bb6b8035..a11db2b1a63f41e16acd4df34a24b2f3e6db9140 100644 (file)
@@ -2405,8 +2405,6 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
        /* disable CG */
        WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0);
 
-       adev->gfx.rlc.funcs->reset(adev);
-
        gfx_v9_0_init_pg(adev);
 
        if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
index 8be9677c0c07dae65e3c69dafab241e3b16ff975..cf9a49f49d3a41a99bb96dc75c38f24e72579367 100644 (file)
@@ -320,6 +320,7 @@ static const struct kfd_deviceid supported_devices[] = {
        { 0x9876, &carrizo_device_info },       /* Carrizo */
        { 0x9877, &carrizo_device_info },       /* Carrizo */
        { 0x15DD, &raven_device_info },         /* Raven */
+       { 0x15D8, &raven_device_info },         /* Raven */
 #endif
        { 0x67A0, &hawaii_device_info },        /* Hawaii */
        { 0x67A1, &hawaii_device_info },        /* Hawaii */
index fb27783d7a542d565e1c002d03fc051d055039be..3082b55b1e774fd31b4293c402c41174df28e9a9 100644 (file)
@@ -4533,6 +4533,7 @@ static void handle_cursor_update(struct drm_plane *plane,
        amdgpu_crtc->cursor_width = plane->state->crtc_w;
        amdgpu_crtc->cursor_height = plane->state->crtc_h;
 
+       memset(&attributes, 0, sizeof(attributes));
        attributes.address.high_part = upper_32_bits(address);
        attributes.address.low_part  = lower_32_bits(address);
        attributes.width             = plane->state->crtc_w;
@@ -5429,9 +5430,11 @@ static void get_freesync_config_for_crtc(
        struct amdgpu_dm_connector *aconnector =
                        to_amdgpu_dm_connector(new_con_state->base.connector);
        struct drm_display_mode *mode = &new_crtc_state->base.mode;
+       int vrefresh = drm_mode_vrefresh(mode);
 
        new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
-               aconnector->min_vfreq <= drm_mode_vrefresh(mode);
+                                       vrefresh >= aconnector->min_vfreq &&
+                                       vrefresh <= aconnector->max_vfreq;
 
        if (new_crtc_state->vrr_supported) {
                new_crtc_state->stream->ignore_msa_timing_param = true;
index 4eba3c4800b63bef00ec9fd532919aa84ca72126..ea18e9c2d8cea5c65582274a297d67b1d0fbb82d 100644 (file)
@@ -2660,12 +2660,18 @@ void core_link_enable_stream(
 void core_link_disable_stream(struct pipe_ctx *pipe_ctx, int option)
 {
        struct dc  *core_dc = pipe_ctx->stream->ctx->dc;
+       struct dc_stream_state *stream = pipe_ctx->stream;
 
        core_dc->hwss.blank_stream(pipe_ctx);
 
        if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
                deallocate_mst_payload(pipe_ctx);
 
+       if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
+               dal_ddc_service_write_scdc_data(
+                       stream->link->ddc, 0,
+                       stream->timing.flags.LTE_340MCSC_SCRAMBLE);
+
        core_dc->hwss.disable_stream(pipe_ctx, option);
 
        disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal);
index 683829466a44c4279db97fbb72338fef9851b404..0ba68d41b9c37b91064a5defbbcf62a3160df53f 100644 (file)
@@ -1150,28 +1150,9 @@ void hubp1_cursor_set_position(
        REG_UPDATE(CURSOR_CONTROL,
                        CURSOR_ENABLE, cur_en);
 
-       //account for cases where we see negative offset relative to overlay plane
-       if (src_x_offset < 0 && src_y_offset < 0) {
-               REG_SET_2(CURSOR_POSITION, 0,
-                       CURSOR_X_POSITION, 0,
-                       CURSOR_Y_POSITION, 0);
-               x_hotspot -= src_x_offset;
-               y_hotspot -= src_y_offset;
-       } else if (src_x_offset < 0) {
-               REG_SET_2(CURSOR_POSITION, 0,
-                       CURSOR_X_POSITION, 0,
-                       CURSOR_Y_POSITION, pos->y);
-               x_hotspot -= src_x_offset;
-       } else if (src_y_offset < 0) {
-               REG_SET_2(CURSOR_POSITION, 0,
+       REG_SET_2(CURSOR_POSITION, 0,
                        CURSOR_X_POSITION, pos->x,
-                       CURSOR_Y_POSITION, 0);
-               y_hotspot -= src_y_offset;
-       } else {
-               REG_SET_2(CURSOR_POSITION, 0,
-                               CURSOR_X_POSITION, pos->x,
-                               CURSOR_Y_POSITION, pos->y);
-       }
+                       CURSOR_Y_POSITION, pos->y);
 
        REG_SET_2(CURSOR_HOT_SPOT, 0,
                        CURSOR_HOT_SPOT_X, x_hotspot,
index 9aa7bec1b5fe6f3aeb67da66d4b88b2e16966bbb..23b5b94a4939ac809c40448f1aa33e5d1500f93e 100644 (file)
@@ -91,6 +91,12 @@ static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr)
         *   MP0CLK DS
         */
        data->registry_data.disallowed_features = 0xE0041C00;
+       /* ECC feature should be disabled on old SMUs */
+       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion);
+       hwmgr->smu_version = smum_get_argument(hwmgr);
+       if (hwmgr->smu_version < 0x282100)
+               data->registry_data.disallowed_features |= FEATURE_ECC_MASK;
+
        data->registry_data.od_state_in_dc_support = 0;
        data->registry_data.thermal_support = 1;
        data->registry_data.skip_baco_hardware = 0;
@@ -357,6 +363,7 @@ static void vega20_init_dpm_defaults(struct pp_hwmgr *hwmgr)
        data->smu_features[GNLD_DS_MP1CLK].smu_feature_id = FEATURE_DS_MP1CLK_BIT;
        data->smu_features[GNLD_DS_MP0CLK].smu_feature_id = FEATURE_DS_MP0CLK_BIT;
        data->smu_features[GNLD_XGMI].smu_feature_id = FEATURE_XGMI_BIT;
+       data->smu_features[GNLD_ECC].smu_feature_id = FEATURE_ECC_BIT;
 
        for (i = 0; i < GNLD_FEATURES_MAX; i++) {
                data->smu_features[i].smu_feature_bitmap =
@@ -3020,7 +3027,8 @@ static int vega20_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf)
                                "FCLK_DS",
                                "MP1CLK_DS",
                                "MP0CLK_DS",
-                               "XGMI"};
+                               "XGMI",
+                               "ECC"};
        static const char *output_title[] = {
                                "FEATURES",
                                "BITMASK",
@@ -3462,6 +3470,7 @@ static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr)
        struct vega20_single_dpm_table *dpm_table;
        bool vblank_too_short = false;
        bool disable_mclk_switching;
+       bool disable_fclk_switching;
        uint32_t i, latency;
 
        disable_mclk_switching = ((1 < hwmgr->display_config->num_display) &&
@@ -3537,13 +3546,20 @@ static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr)
        if (hwmgr->display_config->nb_pstate_switch_disable)
                dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
 
+       if ((disable_mclk_switching &&
+           (dpm_table->dpm_state.hard_min_level == dpm_table->dpm_levels[dpm_table->count - 1].value)) ||
+            hwmgr->display_config->min_mem_set_clock / 100 >= dpm_table->dpm_levels[dpm_table->count - 1].value)
+               disable_fclk_switching = true;
+       else
+               disable_fclk_switching = false;
+
        /* fclk */
        dpm_table = &(data->dpm_table.fclk_table);
        dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
        dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT;
        dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
        dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT;
-       if (hwmgr->display_config->nb_pstate_switch_disable)
+       if (hwmgr->display_config->nb_pstate_switch_disable || disable_fclk_switching)
                dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
 
        /* vclk */
index a5bc758ae09728327bd1230dbbf9969460eba263..ac2a3118a0ae779224be91fd750dda2319027a64 100644 (file)
@@ -80,6 +80,7 @@ enum {
        GNLD_DS_MP1CLK,
        GNLD_DS_MP0CLK,
        GNLD_XGMI,
+       GNLD_ECC,
 
        GNLD_FEATURES_MAX
 };
index 63d5cf69154967b90aa696de2ae5c1d407bd579f..195c4ae67058554d1d2bf194cedf9762f79a96e3 100644 (file)
@@ -99,7 +99,7 @@
 #define FEATURE_DS_MP1CLK_BIT           30
 #define FEATURE_DS_MP0CLK_BIT           31
 #define FEATURE_XGMI_BIT                32
-#define FEATURE_SPARE_33_BIT            33
+#define FEATURE_ECC_BIT                 33
 #define FEATURE_SPARE_34_BIT            34
 #define FEATURE_SPARE_35_BIT            35
 #define FEATURE_SPARE_36_BIT            36
 #define FEATURE_DS_FCLK_MASK            (1 << FEATURE_DS_FCLK_BIT            )
 #define FEATURE_DS_MP1CLK_MASK          (1 << FEATURE_DS_MP1CLK_BIT          )
 #define FEATURE_DS_MP0CLK_MASK          (1 << FEATURE_DS_MP0CLK_BIT          )
-#define FEATURE_XGMI_MASK               (1 << FEATURE_XGMI_BIT               )
+#define FEATURE_XGMI_MASK               (1ULL << FEATURE_XGMI_BIT               )
+#define FEATURE_ECC_MASK                (1ULL << FEATURE_ECC_BIT                )
 
 #define DPM_OVERRIDE_DISABLE_SOCCLK_PID             0x00000001
 #define DPM_OVERRIDE_DISABLE_UCLK_PID               0x00000002
index a63e5f0dae56ad3de5a372ecd167a350d5b89457..db761329a1e3ef19d2fa05f86fdaf5b3b06c6b53 100644 (file)
@@ -1037,6 +1037,31 @@ void dw_hdmi_phy_i2c_write(struct dw_hdmi *hdmi, unsigned short data,
 }
 EXPORT_SYMBOL_GPL(dw_hdmi_phy_i2c_write);
 
+/* Filter out invalid setups to avoid configuring SCDC and scrambling */
+static bool dw_hdmi_support_scdc(struct dw_hdmi *hdmi)
+{
+       struct drm_display_info *display = &hdmi->connector.display_info;
+
+       /* Completely disable SCDC support for older controllers */
+       if (hdmi->version < 0x200a)
+               return false;
+
+       /* Disable if SCDC is not supported, or if an HF-VSDB block is absent */
+       if (!display->hdmi.scdc.supported ||
+           !display->hdmi.scdc.scrambling.supported)
+               return false;
+
+       /*
+        * Disable if display only support low TMDS rates and scrambling
+        * for low rates is not supported either
+        */
+       if (!display->hdmi.scdc.scrambling.low_rates &&
+           display->max_tmds_clock <= 340000)
+               return false;
+
+       return true;
+}
+
 /*
  * HDMI2.0 Specifies the following procedure for High TMDS Bit Rates:
  * - The Source shall suspend transmission of the TMDS clock and data
@@ -1055,7 +1080,7 @@ void dw_hdmi_set_high_tmds_clock_ratio(struct dw_hdmi *hdmi)
        unsigned long mtmdsclock = hdmi->hdmi_data.video_mode.mtmdsclock;
 
        /* Control for TMDS Bit Period/TMDS Clock-Period Ratio */
-       if (hdmi->connector.display_info.hdmi.scdc.supported) {
+       if (dw_hdmi_support_scdc(hdmi)) {
                if (mtmdsclock > HDMI14_MAX_TMDSCLK)
                        drm_scdc_set_high_tmds_clock_ratio(hdmi->ddc, 1);
                else
@@ -1579,8 +1604,9 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi,
 
        /* Set up HDMI_FC_INVIDCONF */
        inv_val = (hdmi->hdmi_data.hdcp_enable ||
-                  vmode->mtmdsclock > HDMI14_MAX_TMDSCLK ||
-                  hdmi_info->scdc.scrambling.low_rates ?
+                  (dw_hdmi_support_scdc(hdmi) &&
+                   (vmode->mtmdsclock > HDMI14_MAX_TMDSCLK ||
+                    hdmi_info->scdc.scrambling.low_rates)) ?
                HDMI_FC_INVIDCONF_HDCP_KEEPOUT_ACTIVE :
                HDMI_FC_INVIDCONF_HDCP_KEEPOUT_INACTIVE);
 
@@ -1646,7 +1672,7 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi,
        }
 
        /* Scrambling Control */
-       if (hdmi_info->scdc.supported) {
+       if (dw_hdmi_support_scdc(hdmi)) {
                if (vmode->mtmdsclock > HDMI14_MAX_TMDSCLK ||
                    hdmi_info->scdc.scrambling.low_rates) {
                        /*
index 40ac1984803459b7a0e8f67e09f81b61820035ef..fbb76332cc9f149c0cc037a6d35a5ed9c63d1baa 100644 (file)
@@ -1034,7 +1034,7 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
                        funcs->atomic_disable(crtc, old_crtc_state);
                else if (funcs->disable)
                        funcs->disable(crtc);
-               else
+               else if (funcs->dpms)
                        funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
 
                if (!(dev->irq_enabled && dev->num_crtcs))
@@ -1277,10 +1277,9 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
                if (new_crtc_state->enable) {
                        DRM_DEBUG_ATOMIC("enabling [CRTC:%d:%s]\n",
                                         crtc->base.id, crtc->name);
-
                        if (funcs->atomic_enable)
                                funcs->atomic_enable(crtc, old_crtc_state);
-                       else
+                       else if (funcs->commit)
                                funcs->commit(crtc);
                }
        }
index 381581b01d485e581df8bcebcd6983b01bc8a488..05bbc2b622fc1094a2a8f85ce060d0805eae0f7e 100644 (file)
@@ -376,11 +376,7 @@ void drm_dev_unplug(struct drm_device *dev)
        synchronize_srcu(&drm_unplug_srcu);
 
        drm_dev_unregister(dev);
-
-       mutex_lock(&drm_global_mutex);
-       if (dev->open_count == 0)
-               drm_dev_put(dev);
-       mutex_unlock(&drm_global_mutex);
+       drm_dev_put(dev);
 }
 EXPORT_SYMBOL(drm_dev_unplug);
 
index 0e9349ff2d16a64dd6628ab47de8f9ab0271d632..af2ab640cadbb05105325a0de2b31ae5f5c70ccf 100644 (file)
@@ -1963,7 +1963,7 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
                                best_depth = fmt->depth;
                }
        }
-       if (sizes.surface_depth != best_depth) {
+       if (sizes.surface_depth != best_depth && best_depth) {
                DRM_INFO("requested bpp %d, scaled depth down to %d",
                         sizes.surface_bpp, best_depth);
                sizes.surface_depth = best_depth;
index 83a5bbca6e7e089f10d75ea723ac982b7df61356..7caa3c7ed9789901e4aa5df2c2204326cfe39c27 100644 (file)
@@ -489,11 +489,9 @@ int drm_release(struct inode *inode, struct file *filp)
 
        drm_close_helper(filp);
 
-       if (!--dev->open_count) {
+       if (!--dev->open_count)
                drm_lastclose(dev);
-               if (drm_dev_is_unplugged(dev))
-                       drm_put_dev(dev);
-       }
+
        mutex_unlock(&drm_global_mutex);
 
        drm_minor_release(minor);
index 35b4ec3f7618b887e5661d0d652cca99b6ed02c6..3592d04c33b283cac0abd2f432ce313194d2b606 100644 (file)
@@ -1441,7 +1441,7 @@ static inline int cmd_address_audit(struct parser_exec_state *s,
        }
 
        if (index_mode) {
-               if (guest_gma >= I915_GTT_PAGE_SIZE / sizeof(u64)) {
+               if (guest_gma >= I915_GTT_PAGE_SIZE) {
                        ret = -EFAULT;
                        goto err;
                }
index 035479e273beca866575c4bef70438583029d2df..e3f9caa7839f7347e1eaa25a798c6574446e813f 100644 (file)
@@ -448,7 +448,7 @@ void intel_gvt_emulate_vblank(struct intel_gvt *gvt)
 /**
  * intel_vgpu_emulate_hotplug - trigger hotplug event for vGPU
  * @vgpu: a vGPU
- * @conncted: link state
+ * @connected: link state
  *
  * This function is used to trigger hotplug interrupt for vGPU
  *
index 3e7e2b80c8579017cecdda478bc6166e1f46e061..69a9a1b2ea4ac44ba7d8f6530f99f59a9958076f 100644 (file)
@@ -209,7 +209,7 @@ static int vgpu_get_plane_info(struct drm_device *dev,
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_vgpu_primary_plane_format p;
        struct intel_vgpu_cursor_plane_format c;
-       int ret;
+       int ret, tile_height = 1;
 
        if (plane_id == DRM_PLANE_TYPE_PRIMARY) {
                ret = intel_vgpu_decode_primary_plane(vgpu, &p);
@@ -228,19 +228,19 @@ static int vgpu_get_plane_info(struct drm_device *dev,
                        break;
                case PLANE_CTL_TILED_X:
                        info->drm_format_mod = I915_FORMAT_MOD_X_TILED;
+                       tile_height = 8;
                        break;
                case PLANE_CTL_TILED_Y:
                        info->drm_format_mod = I915_FORMAT_MOD_Y_TILED;
+                       tile_height = 32;
                        break;
                case PLANE_CTL_TILED_YF:
                        info->drm_format_mod = I915_FORMAT_MOD_Yf_TILED;
+                       tile_height = 32;
                        break;
                default:
                        gvt_vgpu_err("invalid tiling mode: %x\n", p.tiled);
                }
-
-               info->size = (((p.stride * p.height * p.bpp) / 8) +
-                             (PAGE_SIZE - 1)) >> PAGE_SHIFT;
        } else if (plane_id == DRM_PLANE_TYPE_CURSOR) {
                ret = intel_vgpu_decode_cursor_plane(vgpu, &c);
                if (ret)
@@ -262,14 +262,13 @@ static int vgpu_get_plane_info(struct drm_device *dev,
                        info->x_hot = UINT_MAX;
                        info->y_hot = UINT_MAX;
                }
-
-               info->size = (((info->stride * c.height * c.bpp) / 8)
-                               + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
        } else {
                gvt_vgpu_err("invalid plane id:%d\n", plane_id);
                return -EINVAL;
        }
 
+       info->size = (info->stride * roundup(info->height, tile_height)
+                     + PAGE_SIZE - 1) >> PAGE_SHIFT;
        if (info->size == 0) {
                gvt_vgpu_err("fb size is zero\n");
                return -EINVAL;
index c7103dd2d8d571fde462f173dcc67efc0973cc69..9814773882ec2b875ae2db00a22768deed72c618 100644 (file)
@@ -750,14 +750,20 @@ static void ppgtt_free_spt(struct intel_vgpu_ppgtt_spt *spt)
 
 static void ppgtt_free_all_spt(struct intel_vgpu *vgpu)
 {
-       struct intel_vgpu_ppgtt_spt *spt;
+       struct intel_vgpu_ppgtt_spt *spt, *spn;
        struct radix_tree_iter iter;
-       void **slot;
+       LIST_HEAD(all_spt);
+       void __rcu **slot;
 
+       rcu_read_lock();
        radix_tree_for_each_slot(slot, &vgpu->gtt.spt_tree, &iter, 0) {
                spt = radix_tree_deref_slot(slot);
-               ppgtt_free_spt(spt);
+               list_move(&spt->post_shadow_list, &all_spt);
        }
+       rcu_read_unlock();
+
+       list_for_each_entry_safe(spt, spn, &all_spt, post_shadow_list)
+               ppgtt_free_spt(spt);
 }
 
 static int ppgtt_handle_guest_write_page_table_bytes(
@@ -1882,7 +1888,11 @@ struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu,
        }
 
        list_add_tail(&mm->ppgtt_mm.list, &vgpu->gtt.ppgtt_mm_list_head);
+
+       mutex_lock(&gvt->gtt.ppgtt_mm_lock);
        list_add_tail(&mm->ppgtt_mm.lru_list, &gvt->gtt.ppgtt_mm_lru_list_head);
+       mutex_unlock(&gvt->gtt.ppgtt_mm_lock);
+
        return mm;
 }
 
@@ -1942,7 +1952,7 @@ void _intel_vgpu_mm_release(struct kref *mm_ref)
  */
 void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm)
 {
-       atomic_dec(&mm->pincount);
+       atomic_dec_if_positive(&mm->pincount);
 }
 
 /**
@@ -1967,9 +1977,10 @@ int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm)
                if (ret)
                        return ret;
 
+               mutex_lock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
                list_move_tail(&mm->ppgtt_mm.lru_list,
                               &mm->vgpu->gvt->gtt.ppgtt_mm_lru_list_head);
-
+               mutex_unlock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
        }
 
        return 0;
@@ -1980,6 +1991,8 @@ static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt)
        struct intel_vgpu_mm *mm;
        struct list_head *pos, *n;
 
+       mutex_lock(&gvt->gtt.ppgtt_mm_lock);
+
        list_for_each_safe(pos, n, &gvt->gtt.ppgtt_mm_lru_list_head) {
                mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.lru_list);
 
@@ -1987,9 +2000,11 @@ static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt)
                        continue;
 
                list_del_init(&mm->ppgtt_mm.lru_list);
+               mutex_unlock(&gvt->gtt.ppgtt_mm_lock);
                invalidate_ppgtt_mm(mm);
                return 1;
        }
+       mutex_unlock(&gvt->gtt.ppgtt_mm_lock);
        return 0;
 }
 
@@ -2659,6 +2674,7 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
                }
        }
        INIT_LIST_HEAD(&gvt->gtt.ppgtt_mm_lru_list_head);
+       mutex_init(&gvt->gtt.ppgtt_mm_lock);
        return 0;
 }
 
@@ -2699,7 +2715,9 @@ void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu)
        list_for_each_safe(pos, n, &vgpu->gtt.ppgtt_mm_list_head) {
                mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list);
                if (mm->type == INTEL_GVT_MM_PPGTT) {
+                       mutex_lock(&vgpu->gvt->gtt.ppgtt_mm_lock);
                        list_del_init(&mm->ppgtt_mm.lru_list);
+                       mutex_unlock(&vgpu->gvt->gtt.ppgtt_mm_lock);
                        if (mm->ppgtt_mm.shadowed)
                                invalidate_ppgtt_mm(mm);
                }
index d8cb04cc946dff3e19466ff387089db96c226d53..edb610dc5d8689e49f22310b310133b9cb3ee921 100644 (file)
@@ -88,6 +88,7 @@ struct intel_gvt_gtt {
        void (*mm_free_page_table)(struct intel_vgpu_mm *mm);
        struct list_head oos_page_use_list_head;
        struct list_head oos_page_free_list_head;
+       struct mutex ppgtt_mm_lock;
        struct list_head ppgtt_mm_lru_list_head;
 
        struct page *scratch_page;
index d5fcc447d22f0d0663a4664c5767d72fa199cb0d..a68addf95c230f2edcc9b5b21860e9aee406bc27 100644 (file)
@@ -905,7 +905,7 @@ static inline bool intel_vgpu_in_aperture(struct intel_vgpu *vgpu, u64 off)
 static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, u64 off,
                void *buf, unsigned long count, bool is_write)
 {
-       void *aperture_va;
+       void __iomem *aperture_va;
 
        if (!intel_vgpu_in_aperture(vgpu, off) ||
            !intel_vgpu_in_aperture(vgpu, off + count)) {
@@ -920,9 +920,9 @@ static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, u64 off,
                return -EIO;
 
        if (is_write)
-               memcpy(aperture_va + offset_in_page(off), buf, count);
+               memcpy_toio(aperture_va + offset_in_page(off), buf, count);
        else
-               memcpy(buf, aperture_va + offset_in_page(off), count);
+               memcpy_fromio(buf, aperture_va + offset_in_page(off), count);
 
        io_mapping_unmap(aperture_va);
 
index 7d84cfb9051ac886579648ac7bb2cc5e2a70b3fa..7902fb162d09441f9b4f65447f5e6619b8792c01 100644 (file)
@@ -132,6 +132,7 @@ static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {
 
        {RCS, GEN9_GAMT_ECO_REG_RW_IA, 0x0, false}, /* 0x4ab0 */
        {RCS, GEN9_CSFE_CHICKEN1_RCS, 0xffff, false}, /* 0x20d4 */
+       {RCS, _MMIO(0x20D8), 0xffff, true}, /* 0x20d8 */
 
        {RCS, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */
        {RCS, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */
index 1bb8f936fdaa75f2ee738bdf3235a247fac90fe8..05b953793316b28ac1fb19c902474e468ba828b0 100644 (file)
@@ -346,7 +346,7 @@ static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
        int i = 0;
 
        if (mm->type != INTEL_GVT_MM_PPGTT || !mm->ppgtt_mm.shadowed)
-               return -1;
+               return -EINVAL;
 
        if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
                px_dma(&ppgtt->pml4) = mm->ppgtt_mm.shadow_pdps[0];
@@ -410,12 +410,6 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
        if (workload->shadow)
                return 0;
 
-       ret = set_context_ppgtt_from_shadow(workload, shadow_ctx);
-       if (ret < 0) {
-               gvt_vgpu_err("workload shadow ppgtt isn't ready\n");
-               return ret;
-       }
-
        /* pin shadow context by gvt even the shadow context will be pinned
         * when i915 alloc request. That is because gvt will update the guest
         * context from shadow context when workload is completed, and at that
@@ -678,6 +672,9 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
 {
        struct intel_vgpu *vgpu = workload->vgpu;
        struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+       struct intel_vgpu_submission *s = &vgpu->submission;
+       struct i915_gem_context *shadow_ctx = s->shadow_ctx;
+       struct i915_request *rq;
        int ring_id = workload->ring_id;
        int ret;
 
@@ -687,6 +684,12 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
        mutex_lock(&vgpu->vgpu_lock);
        mutex_lock(&dev_priv->drm.struct_mutex);
 
+       ret = set_context_ppgtt_from_shadow(workload, shadow_ctx);
+       if (ret < 0) {
+               gvt_vgpu_err("workload shadow ppgtt isn't ready\n");
+               goto err_req;
+       }
+
        ret = intel_gvt_workload_req_alloc(workload);
        if (ret)
                goto err_req;
@@ -703,6 +706,14 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
 
        ret = prepare_workload(workload);
 out:
+       if (ret) {
+               /* We might still need to add request with
+                * clean ctx to retire it properly..
+                */
+               rq = fetch_and_zero(&workload->req);
+               i915_request_put(rq);
+       }
+
        if (!IS_ERR_OR_NULL(workload->req)) {
                gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
                                ring_id, workload->req);
@@ -739,7 +750,8 @@ static struct intel_vgpu_workload *pick_next_workload(
                goto out;
        }
 
-       if (list_empty(workload_q_head(scheduler->current_vgpu, ring_id)))
+       if (!scheduler->current_vgpu->active ||
+           list_empty(workload_q_head(scheduler->current_vgpu, ring_id)))
                goto out;
 
        /*
@@ -1474,8 +1486,9 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
                intel_runtime_pm_put_unchecked(dev_priv);
        }
 
-       if (ret && (vgpu_is_vm_unhealthy(ret))) {
-               enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
+       if (ret) {
+               if (vgpu_is_vm_unhealthy(ret))
+                       enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
                intel_vgpu_destroy_workload(workload);
                return ERR_PTR(ret);
        }
index 0bd890c04fe4f7c911bd9bde1a79af11af08ff5c..f6f6e5b78e9784c0ffee5f7132a8ddd2a9339954 100644 (file)
@@ -4830,7 +4830,10 @@ static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
                ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
                                       &ctx);
                if (ret) {
-                       ret = -EINTR;
+                       if (ret == -EDEADLK && !drm_modeset_backoff(&ctx)) {
+                               try_again = true;
+                               continue;
+                       }
                        break;
                }
                crtc = connector->state->crtc;
index 9adc7bb9e69ccfec96e468f95435b83e084ffcce..a67a63b5aa84a09d675793dc118fce8829315917 100644 (file)
@@ -2346,7 +2346,8 @@ static inline unsigned int i915_sg_segment_size(void)
                                 INTEL_DEVID(dev_priv) == 0x5915 || \
                                 INTEL_DEVID(dev_priv) == 0x591E)
 #define IS_AML_ULX(dev_priv)   (INTEL_DEVID(dev_priv) == 0x591C || \
-                                INTEL_DEVID(dev_priv) == 0x87C0)
+                                INTEL_DEVID(dev_priv) == 0x87C0 || \
+                                INTEL_DEVID(dev_priv) == 0x87CA)
 #define IS_SKL_GT2(dev_priv)   (IS_SKYLAKE(dev_priv) && \
                                 INTEL_INFO(dev_priv)->gt == 2)
 #define IS_SKL_GT3(dev_priv)   (IS_SKYLAKE(dev_priv) && \
index 638a586469f97be9fb83bbbcb152c518e7d46e1e..047855dd8c6b828ce42f926680f7d8466883d3cc 100644 (file)
@@ -2863,7 +2863,7 @@ enum i915_power_well_id {
 #define GEN11_GT_VEBOX_VDBOX_DISABLE   _MMIO(0x9140)
 #define   GEN11_GT_VDBOX_DISABLE_MASK  0xff
 #define   GEN11_GT_VEBOX_DISABLE_SHIFT 16
-#define   GEN11_GT_VEBOX_DISABLE_MASK  (0xff << GEN11_GT_VEBOX_DISABLE_SHIFT)
+#define   GEN11_GT_VEBOX_DISABLE_MASK  (0x0f << GEN11_GT_VEBOX_DISABLE_SHIFT)
 
 #define GEN11_EU_DISABLE _MMIO(0x9134)
 #define GEN11_EU_DIS_MASK 0xFF
@@ -9243,7 +9243,7 @@ enum skl_power_gate {
 #define TRANS_DDI_FUNC_CTL2(tran)      _MMIO_TRANS2(tran, \
                                                     _TRANS_DDI_FUNC_CTL2_A)
 #define  PORT_SYNC_MODE_ENABLE                 (1 << 4)
-#define  PORT_SYNC_MODE_MASTER_SELECT(x)       ((x) < 0)
+#define  PORT_SYNC_MODE_MASTER_SELECT(x)       ((x) << 0)
 #define  PORT_SYNC_MODE_MASTER_SELECT_MASK     (0x7 << 0)
 #define  PORT_SYNC_MODE_MASTER_SELECT_SHIFT    0
 
index 73a7bee24a663faa672ade21cfe7ea7cb1bc4b46..641e0778fa9c4123204f75091df3c53b5162a961 100644 (file)
@@ -323,6 +323,21 @@ static void gen11_dsi_program_esc_clk_div(struct intel_encoder *encoder)
        }
 }
 
+static void get_dsi_io_power_domains(struct drm_i915_private *dev_priv,
+                                    struct intel_dsi *intel_dsi)
+{
+       enum port port;
+
+       for_each_dsi_port(port, intel_dsi->ports) {
+               WARN_ON(intel_dsi->io_wakeref[port]);
+               intel_dsi->io_wakeref[port] =
+                       intel_display_power_get(dev_priv,
+                                               port == PORT_A ?
+                                               POWER_DOMAIN_PORT_DDI_A_IO :
+                                               POWER_DOMAIN_PORT_DDI_B_IO);
+       }
+}
+
 static void gen11_dsi_enable_io_power(struct intel_encoder *encoder)
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -336,13 +351,7 @@ static void gen11_dsi_enable_io_power(struct intel_encoder *encoder)
                I915_WRITE(ICL_DSI_IO_MODECTL(port), tmp);
        }
 
-       for_each_dsi_port(port, intel_dsi->ports) {
-               intel_dsi->io_wakeref[port] =
-                       intel_display_power_get(dev_priv,
-                                               port == PORT_A ?
-                                               POWER_DOMAIN_PORT_DDI_A_IO :
-                                               POWER_DOMAIN_PORT_DDI_B_IO);
-       }
+       get_dsi_io_power_domains(dev_priv, intel_dsi);
 }
 
 static void gen11_dsi_power_up_lanes(struct intel_encoder *encoder)
@@ -589,6 +598,12 @@ static void gen11_dsi_map_pll(struct intel_encoder *encoder,
                val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, port);
        }
        I915_WRITE(DPCLKA_CFGCR0_ICL, val);
+
+       for_each_dsi_port(port, intel_dsi->ports) {
+               val &= ~DPCLKA_CFGCR0_DDI_CLK_OFF(port);
+       }
+       I915_WRITE(DPCLKA_CFGCR0_ICL, val);
+
        POSTING_READ(DPCLKA_CFGCR0_ICL);
 
        mutex_unlock(&dev_priv->dpll_lock);
@@ -1117,7 +1132,7 @@ static void gen11_dsi_disable_port(struct intel_encoder *encoder)
                        DRM_ERROR("DDI port:%c buffer not idle\n",
                                  port_name(port));
        }
-       gen11_dsi_ungate_clocks(encoder);
+       gen11_dsi_gate_clocks(encoder);
 }
 
 static void gen11_dsi_disable_io_power(struct intel_encoder *encoder)
@@ -1218,20 +1233,11 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder,
        return 0;
 }
 
-static u64 gen11_dsi_get_power_domains(struct intel_encoder *encoder,
-                                      struct intel_crtc_state *crtc_state)
+static void gen11_dsi_get_power_domains(struct intel_encoder *encoder,
+                                       struct intel_crtc_state *crtc_state)
 {
-       struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
-       u64 domains = 0;
-       enum port port;
-
-       for_each_dsi_port(port, intel_dsi->ports)
-               if (port == PORT_A)
-                       domains |= BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO);
-               else
-                       domains |= BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO);
-
-       return domains;
+       get_dsi_io_power_domains(to_i915(encoder->base.dev),
+                                enc_to_intel_dsi(&encoder->base));
 }
 
 static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder,
index 14d580cdefd3e875e08b7af0be350d4f877fb7ef..ab4e60dfd6a3460001cbcae4691f1ede8ebb230e 100644 (file)
@@ -2075,12 +2075,11 @@ intel_ddi_main_link_aux_domain(struct intel_digital_port *dig_port)
                                              intel_aux_power_domain(dig_port);
 }
 
-static u64 intel_ddi_get_power_domains(struct intel_encoder *encoder,
-                                      struct intel_crtc_state *crtc_state)
+static void intel_ddi_get_power_domains(struct intel_encoder *encoder,
+                                       struct intel_crtc_state *crtc_state)
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_digital_port *dig_port;
-       u64 domains;
 
        /*
         * TODO: Add support for MST encoders. Atm, the following should never
@@ -2088,10 +2087,10 @@ static u64 intel_ddi_get_power_domains(struct intel_encoder *encoder,
         * hook.
         */
        if (WARN_ON(intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)))
-               return 0;
+               return;
 
        dig_port = enc_to_dig_port(&encoder->base);
-       domains = BIT_ULL(dig_port->ddi_io_power_domain);
+       intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain);
 
        /*
         * AUX power is only needed for (e)DP mode, and for HDMI mode on TC
@@ -2099,15 +2098,15 @@ static u64 intel_ddi_get_power_domains(struct intel_encoder *encoder,
         */
        if (intel_crtc_has_dp_encoder(crtc_state) ||
            intel_port_is_tc(dev_priv, encoder->port))
-               domains |= BIT_ULL(intel_ddi_main_link_aux_domain(dig_port));
+               intel_display_power_get(dev_priv,
+                                       intel_ddi_main_link_aux_domain(dig_port));
 
        /*
         * VDSC power is needed when DSC is enabled
         */
        if (crtc_state->dsc_params.compression_enable)
-               domains |= BIT_ULL(intel_dsc_power_domain(crtc_state));
-
-       return domains;
+               intel_display_power_get(dev_priv,
+                                       intel_dsc_power_domain(crtc_state));
 }
 
 void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state)
@@ -2825,10 +2824,10 @@ void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
                                return;
                }
                /*
-                * DSI ports should have their DDI clock ungated when disabled
-                * and gated when enabled.
+                * For DSI we keep the ddi clocks gated
+                * except during enable/disable sequence.
                 */
-               ddi_clk_needed = !encoder->base.crtc;
+               ddi_clk_needed = false;
        }
 
        val = I915_READ(DPCLKA_CFGCR0_ICL);
index ccb616351bba725052ea3a752cc5263744d58af2..421aac80a83815b9c1cfa40a7142e171cfd8bcd5 100644 (file)
@@ -15986,8 +15986,6 @@ get_encoder_power_domains(struct drm_i915_private *dev_priv)
        struct intel_encoder *encoder;
 
        for_each_intel_encoder(&dev_priv->drm, encoder) {
-               u64 get_domains;
-               enum intel_display_power_domain domain;
                struct intel_crtc_state *crtc_state;
 
                if (!encoder->get_power_domains)
@@ -16001,9 +15999,7 @@ get_encoder_power_domains(struct drm_i915_private *dev_priv)
                        continue;
 
                crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
-               get_domains = encoder->get_power_domains(encoder, crtc_state);
-               for_each_power_domain(domain, get_domains)
-                       intel_display_power_get(dev_priv, domain);
+               encoder->get_power_domains(encoder, crtc_state);
        }
 }
 
index cf709835fb9a9eece3c0761c21c53c34a25b7e22..8891f29a8c7fffacad25f29e718376aa164261f7 100644 (file)
@@ -1859,42 +1859,6 @@ intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
        return -EINVAL;
 }
 
-/* Optimize link config in order: max bpp, min lanes, min clock */
-static int
-intel_dp_compute_link_config_fast(struct intel_dp *intel_dp,
-                                 struct intel_crtc_state *pipe_config,
-                                 const struct link_config_limits *limits)
-{
-       struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
-       int bpp, clock, lane_count;
-       int mode_rate, link_clock, link_avail;
-
-       for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
-               mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
-                                                  bpp);
-
-               for (lane_count = limits->min_lane_count;
-                    lane_count <= limits->max_lane_count;
-                    lane_count <<= 1) {
-                       for (clock = limits->min_clock; clock <= limits->max_clock; clock++) {
-                               link_clock = intel_dp->common_rates[clock];
-                               link_avail = intel_dp_max_data_rate(link_clock,
-                                                                   lane_count);
-
-                               if (mode_rate <= link_avail) {
-                                       pipe_config->lane_count = lane_count;
-                                       pipe_config->pipe_bpp = bpp;
-                                       pipe_config->port_clock = link_clock;
-
-                                       return 0;
-                               }
-                       }
-               }
-       }
-
-       return -EINVAL;
-}
-
 static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc)
 {
        int i, num_bpc;
@@ -2031,15 +1995,13 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
        limits.min_bpp = 6 * 3;
        limits.max_bpp = intel_dp_compute_bpp(intel_dp, pipe_config);
 
-       if (intel_dp_is_edp(intel_dp) && intel_dp->edp_dpcd[0] < DP_EDP_14) {
+       if (intel_dp_is_edp(intel_dp)) {
                /*
                 * Use the maximum clock and number of lanes the eDP panel
-                * advertizes being capable of. The eDP 1.3 and earlier panels
-                * are generally designed to support only a single clock and
-                * lane configuration, and typically these values correspond to
-                * the native resolution of the panel. With eDP 1.4 rate select
-                * and DSC, this is decreasingly the case, and we need to be
-                * able to select less than maximum link config.
+                * advertizes being capable of. The panels are generally
+                * designed to support only a single clock and lane
+                * configuration, and typically these values correspond to the
+                * native resolution of the panel.
                 */
                limits.min_lane_count = limits.max_lane_count;
                limits.min_clock = limits.max_clock;
@@ -2053,22 +2015,11 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
                      intel_dp->common_rates[limits.max_clock],
                      limits.max_bpp, adjusted_mode->crtc_clock);
 
-       if (intel_dp_is_edp(intel_dp))
-               /*
-                * Optimize for fast and narrow. eDP 1.3 section 3.3 and eDP 1.4
-                * section A.1: "It is recommended that the minimum number of
-                * lanes be used, using the minimum link rate allowed for that
-                * lane configuration."
-                *
-                * Note that we use the max clock and lane count for eDP 1.3 and
-                * earlier, and fast vs. wide is irrelevant.
-                */
-               ret = intel_dp_compute_link_config_fast(intel_dp, pipe_config,
-                                                       &limits);
-       else
-               /* Optimize for slow and wide. */
-               ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config,
-                                                       &limits);
+       /*
+        * Optimize for slow and wide. This is the place to add alternative
+        * optimization policy.
+        */
+       ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits);
 
        /* enable compression if the mode doesn't fit available BW */
        DRM_DEBUG_KMS("Force DSC en = %d\n", intel_dp->force_dsc_en);
index 15db41394b9ed75d9de8545c8a0faff9efbc9a9b..d5660ac1b0d60999d8788710a0140ec2f8d38570 100644 (file)
@@ -270,10 +270,12 @@ struct intel_encoder {
         * be set correctly before calling this function. */
        void (*get_config)(struct intel_encoder *,
                           struct intel_crtc_state *pipe_config);
-       /* Returns a mask of power domains that need to be referenced as part
-        * of the hardware state readout code. */
-       u64 (*get_power_domains)(struct intel_encoder *encoder,
-                                struct intel_crtc_state *crtc_state);
+       /*
+        * Acquires the power domains needed for an active encoder during
+        * hardware state readout.
+        */
+       void (*get_power_domains)(struct intel_encoder *encoder,
+                                 struct intel_crtc_state *crtc_state);
        /*
         * Called during system suspend after all pending requests for the
         * encoder are flushed (for example for DP AUX transactions) and
index 32dce7176f6381dc2a0429691dccc2eafc7fe360..b9b0ea4e2404d6cfce2c37be5d331591fb88fe6e 100644 (file)
@@ -455,7 +455,7 @@ static int igt_evict_contexts(void *arg)
                        struct i915_gem_context *ctx;
 
                        ctx = live_context(i915, file);
-                       if (!ctx)
+                       if (IS_ERR(ctx))
                                break;
 
                        /* We will need some GGTT space for the rq's context */
index 6403728fe7784f54977b0c318d790ea886553a04..31c93c3ccd00ffa62c3158d159d7cc4afd8f9ae5 100644 (file)
@@ -256,6 +256,28 @@ static void band_gap_reset(struct drm_i915_private *dev_priv)
        mutex_unlock(&dev_priv->sb_lock);
 }
 
+static int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       u32 tmp;
+
+       tmp = I915_READ(PIPEMISC(crtc->pipe));
+
+       switch (tmp & PIPEMISC_DITHER_BPC_MASK) {
+       case PIPEMISC_DITHER_6_BPC:
+               return 18;
+       case PIPEMISC_DITHER_8_BPC:
+               return 24;
+       case PIPEMISC_DITHER_10_BPC:
+               return 30;
+       case PIPEMISC_DITHER_12_BPC:
+               return 36;
+       default:
+               MISSING_CASE(tmp);
+               return 0;
+       }
+}
+
 static int intel_dsi_compute_config(struct intel_encoder *encoder,
                                    struct intel_crtc_state *pipe_config,
                                    struct drm_connector_state *conn_state)
@@ -1071,6 +1093,8 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
        bpp = mipi_dsi_pixel_format_to_bpp(
                        pixel_format_from_register_bits(fmt));
 
+       pipe_config->pipe_bpp = bdw_get_pipemisc_bpp(crtc);
+
        /* Enable Frame time stamo based scanline reporting */
        adjusted_mode->private_flags |=
                        I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP;
index 22e68a100e7beeaf752efde0235dddc7e13b9683..5d333138f9136b6e8b98238a9851085dd669c236 100644 (file)
@@ -662,13 +662,11 @@ static unsigned int mt8173_calculate_factor(int clock)
 static unsigned int mt2701_calculate_factor(int clock)
 {
        if (clock <= 64000)
-               return 16;
-       else if (clock <= 128000)
-               return 8;
-       else if (clock <= 256000)
                return 4;
-       else
+       else if (clock <= 128000)
                return 2;
+       else
+               return 1;
 }
 
 static const struct mtk_dpi_conf mt8173_conf = {
index cf59ea9bccfdf659ca042df67cf437a5da4a1078..57ce4708ef1b9a420cd9862d3c7b7d624a7efa3f 100644 (file)
@@ -15,6 +15,7 @@
 #include <drm/drmP.h>
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
+#include <drm/drm_fb_helper.h>
 #include <drm/drm_gem.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_of.h>
@@ -341,6 +342,8 @@ static struct drm_driver mtk_drm_driver = {
        .gem_prime_get_sg_table = mtk_gem_prime_get_sg_table,
        .gem_prime_import_sg_table = mtk_gem_prime_import_sg_table,
        .gem_prime_mmap = mtk_drm_gem_mmap_buf,
+       .gem_prime_vmap = mtk_drm_gem_prime_vmap,
+       .gem_prime_vunmap = mtk_drm_gem_prime_vunmap,
        .fops = &mtk_drm_fops,
 
        .name = DRIVER_NAME,
@@ -376,6 +379,10 @@ static int mtk_drm_bind(struct device *dev)
        if (ret < 0)
                goto err_deinit;
 
+       ret = drm_fbdev_generic_setup(drm, 32);
+       if (ret)
+               DRM_ERROR("Failed to initialize fbdev: %d\n", ret);
+
        return 0;
 
 err_deinit:
index 259b7b0de1d22d7beb052c19e2ef06e4859afcff..38483e9ee071223228e3b9bc493dc27799390411 100644 (file)
@@ -241,3 +241,49 @@ struct drm_gem_object *mtk_gem_prime_import_sg_table(struct drm_device *dev,
        kfree(mtk_gem);
        return ERR_PTR(ret);
 }
+
+void *mtk_drm_gem_prime_vmap(struct drm_gem_object *obj)
+{
+       struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
+       struct sg_table *sgt;
+       struct sg_page_iter iter;
+       unsigned int npages;
+       unsigned int i = 0;
+
+       if (mtk_gem->kvaddr)
+               return mtk_gem->kvaddr;
+
+       sgt = mtk_gem_prime_get_sg_table(obj);
+       if (IS_ERR(sgt))
+               return NULL;
+
+       npages = obj->size >> PAGE_SHIFT;
+       mtk_gem->pages = kcalloc(npages, sizeof(*mtk_gem->pages), GFP_KERNEL);
+       if (!mtk_gem->pages)
+               goto out;
+
+       for_each_sg_page(sgt->sgl, &iter, sgt->orig_nents, 0) {
+               mtk_gem->pages[i++] = sg_page_iter_page(&iter);
+               if (i > npages)
+                       break;
+       }
+       mtk_gem->kvaddr = vmap(mtk_gem->pages, npages, VM_MAP,
+                              pgprot_writecombine(PAGE_KERNEL));
+
+out:
+       kfree((void *)sgt);
+
+       return mtk_gem->kvaddr;
+}
+
+void mtk_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
+{
+       struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
+
+       if (!mtk_gem->pages)
+               return;
+
+       vunmap(vaddr);
+       mtk_gem->kvaddr = 0;
+       kfree((void *)mtk_gem->pages);
+}
index 534639b43a1c77c24a5942c6cb627dc32389f425..c047a7ef294fd0391ef01cf15071f2d8b3e98f3f 100644 (file)
@@ -37,6 +37,7 @@ struct mtk_drm_gem_obj {
        dma_addr_t              dma_addr;
        unsigned long           dma_attrs;
        struct sg_table         *sg;
+       struct page             **pages;
 };
 
 #define to_mtk_gem_obj(x)      container_of(x, struct mtk_drm_gem_obj, base)
@@ -52,5 +53,7 @@ int mtk_drm_gem_mmap_buf(struct drm_gem_object *obj,
 struct sg_table *mtk_gem_prime_get_sg_table(struct drm_gem_object *obj);
 struct drm_gem_object *mtk_gem_prime_import_sg_table(struct drm_device *dev,
                        struct dma_buf_attachment *attach, struct sg_table *sg);
+void *mtk_drm_gem_prime_vmap(struct drm_gem_object *obj);
+void mtk_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
 
 #endif
index 915cc84621aeaf62516681a87d56e0e9760197d4..e04e6c293d39d189e87cb9bc1d25e4e840750857 100644 (file)
@@ -1480,7 +1480,6 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
        if (IS_ERR(regmap))
                ret = PTR_ERR(regmap);
        if (ret) {
-               ret = PTR_ERR(regmap);
                dev_err(dev,
                        "Failed to get system configuration registers: %d\n",
                        ret);
@@ -1516,6 +1515,7 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
        of_node_put(remote);
 
        hdmi->ddc_adpt = of_find_i2c_adapter_by_node(i2c_np);
+       of_node_put(i2c_np);
        if (!hdmi->ddc_adpt) {
                dev_err(dev, "Failed to get ddc i2c adapter by node\n");
                return -EINVAL;
index 4ef9c57ffd44d4eb6db90dd8cecc0ed02de81295..5223498502c49228839fb993c4c2abd7ccc84a89 100644 (file)
@@ -15,28 +15,6 @@ static const struct phy_ops mtk_hdmi_phy_dev_ops = {
        .owner = THIS_MODULE,
 };
 
-long mtk_hdmi_pll_round_rate(struct clk_hw *hw, unsigned long rate,
-                            unsigned long *parent_rate)
-{
-       struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
-
-       hdmi_phy->pll_rate = rate;
-       if (rate <= 74250000)
-               *parent_rate = rate;
-       else
-               *parent_rate = rate / 2;
-
-       return rate;
-}
-
-unsigned long mtk_hdmi_pll_recalc_rate(struct clk_hw *hw,
-                                      unsigned long parent_rate)
-{
-       struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
-
-       return hdmi_phy->pll_rate;
-}
-
 void mtk_hdmi_phy_clear_bits(struct mtk_hdmi_phy *hdmi_phy, u32 offset,
                             u32 bits)
 {
@@ -110,13 +88,11 @@ mtk_hdmi_phy_dev_get_ops(const struct mtk_hdmi_phy *hdmi_phy)
                return NULL;
 }
 
-static void mtk_hdmi_phy_clk_get_ops(struct mtk_hdmi_phy *hdmi_phy,
-                                    const struct clk_ops **ops)
+static void mtk_hdmi_phy_clk_get_data(struct mtk_hdmi_phy *hdmi_phy,
+                                     struct clk_init_data *clk_init)
 {
-       if (hdmi_phy && hdmi_phy->conf && hdmi_phy->conf->hdmi_phy_clk_ops)
-               *ops = hdmi_phy->conf->hdmi_phy_clk_ops;
-       else
-               dev_err(hdmi_phy->dev, "Failed to get clk ops of phy\n");
+       clk_init->flags = hdmi_phy->conf->flags;
+       clk_init->ops = hdmi_phy->conf->hdmi_phy_clk_ops;
 }
 
 static int mtk_hdmi_phy_probe(struct platform_device *pdev)
@@ -129,7 +105,6 @@ static int mtk_hdmi_phy_probe(struct platform_device *pdev)
        struct clk_init_data clk_init = {
                .num_parents = 1,
                .parent_names = (const char * const *)&ref_clk_name,
-               .flags = CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE,
        };
 
        struct phy *phy;
@@ -167,7 +142,7 @@ static int mtk_hdmi_phy_probe(struct platform_device *pdev)
        hdmi_phy->dev = dev;
        hdmi_phy->conf =
                (struct mtk_hdmi_phy_conf *)of_device_get_match_data(dev);
-       mtk_hdmi_phy_clk_get_ops(hdmi_phy, &clk_init.ops);
+       mtk_hdmi_phy_clk_get_data(hdmi_phy, &clk_init);
        hdmi_phy->pll_hw.init = &clk_init;
        hdmi_phy->pll = devm_clk_register(dev, &hdmi_phy->pll_hw);
        if (IS_ERR(hdmi_phy->pll)) {
index f39b1fc66612944c9b76b8b20f87ef561a0e595b..2d8b3182470dc465b29111e00ee2c9229e4f7c0d 100644 (file)
@@ -21,6 +21,7 @@ struct mtk_hdmi_phy;
 
 struct mtk_hdmi_phy_conf {
        bool tz_disabled;
+       unsigned long flags;
        const struct clk_ops *hdmi_phy_clk_ops;
        void (*hdmi_phy_enable_tmds)(struct mtk_hdmi_phy *hdmi_phy);
        void (*hdmi_phy_disable_tmds)(struct mtk_hdmi_phy *hdmi_phy);
@@ -48,10 +49,6 @@ void mtk_hdmi_phy_set_bits(struct mtk_hdmi_phy *hdmi_phy, u32 offset,
 void mtk_hdmi_phy_mask(struct mtk_hdmi_phy *hdmi_phy, u32 offset,
                       u32 val, u32 mask);
 struct mtk_hdmi_phy *to_mtk_hdmi_phy(struct clk_hw *hw);
-long mtk_hdmi_pll_round_rate(struct clk_hw *hw, unsigned long rate,
-                            unsigned long *parent_rate);
-unsigned long mtk_hdmi_pll_recalc_rate(struct clk_hw *hw,
-                                      unsigned long parent_rate);
 
 extern struct platform_driver mtk_hdmi_phy_driver;
 extern struct mtk_hdmi_phy_conf mtk_hdmi_phy_8173_conf;
index fcc42dc6ea7fb81d1005239a52f81bc020b2f57a..d3cc4022e98844601b82928020ec7cf0cfaf1004 100644 (file)
@@ -79,7 +79,6 @@ static int mtk_hdmi_pll_prepare(struct clk_hw *hw)
        mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SLDO_MASK);
        usleep_range(80, 100);
        mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN);
-       mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_POSDIV);
        mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SER_MASK);
        mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_PRED_MASK);
        mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_DRV_MASK);
@@ -94,7 +93,6 @@ static void mtk_hdmi_pll_unprepare(struct clk_hw *hw)
        mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_DRV_MASK);
        mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_PRED_MASK);
        mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SER_MASK);
-       mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_POSDIV);
        mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN);
        usleep_range(80, 100);
        mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SLDO_MASK);
@@ -108,6 +106,12 @@ static void mtk_hdmi_pll_unprepare(struct clk_hw *hw)
        usleep_range(80, 100);
 }
 
+static long mtk_hdmi_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+                                   unsigned long *parent_rate)
+{
+       return rate;
+}
+
 static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
                                 unsigned long parent_rate)
 {
@@ -116,13 +120,14 @@ static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
 
        if (rate <= 64000000)
                pos_div = 3;
-       else if (rate <= 12800000)
-               pos_div = 1;
+       else if (rate <= 128000000)
+               pos_div = 2;
        else
                pos_div = 1;
 
        mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_PREDIV_MASK);
        mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_POSDIV_MASK);
+       mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_POSDIV);
        mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, (0x1 << RG_HTPLL_IC),
                          RG_HTPLL_IC_MASK);
        mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, (0x1 << RG_HTPLL_IR),
@@ -154,6 +159,39 @@ static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
        return 0;
 }
 
+static unsigned long mtk_hdmi_pll_recalc_rate(struct clk_hw *hw,
+                                             unsigned long parent_rate)
+{
+       struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
+       unsigned long out_rate, val;
+
+       val = (readl(hdmi_phy->regs + HDMI_CON6)
+              & RG_HTPLL_PREDIV_MASK) >> RG_HTPLL_PREDIV;
+       switch (val) {
+       case 0x00:
+               out_rate = parent_rate;
+               break;
+       case 0x01:
+               out_rate = parent_rate / 2;
+               break;
+       default:
+               out_rate = parent_rate / 4;
+               break;
+       }
+
+       val = (readl(hdmi_phy->regs + HDMI_CON6)
+              & RG_HTPLL_FBKDIV_MASK) >> RG_HTPLL_FBKDIV;
+       out_rate *= (val + 1) * 2;
+       val = (readl(hdmi_phy->regs + HDMI_CON2)
+              & RG_HDMITX_TX_POSDIV_MASK);
+       out_rate >>= (val >> RG_HDMITX_TX_POSDIV);
+
+       if (readl(hdmi_phy->regs + HDMI_CON2) & RG_HDMITX_EN_TX_POSDIV)
+               out_rate /= 5;
+
+       return out_rate;
+}
+
 static const struct clk_ops mtk_hdmi_phy_pll_ops = {
        .prepare = mtk_hdmi_pll_prepare,
        .unprepare = mtk_hdmi_pll_unprepare,
@@ -174,7 +212,6 @@ static void mtk_hdmi_phy_enable_tmds(struct mtk_hdmi_phy *hdmi_phy)
        mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SLDO_MASK);
        usleep_range(80, 100);
        mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN);
-       mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_POSDIV);
        mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SER_MASK);
        mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_PRED_MASK);
        mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_DRV_MASK);
@@ -186,7 +223,6 @@ static void mtk_hdmi_phy_disable_tmds(struct mtk_hdmi_phy *hdmi_phy)
        mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_DRV_MASK);
        mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_PRED_MASK);
        mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SER_MASK);
-       mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_POSDIV);
        mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN);
        usleep_range(80, 100);
        mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SLDO_MASK);
@@ -202,6 +238,7 @@ static void mtk_hdmi_phy_disable_tmds(struct mtk_hdmi_phy *hdmi_phy)
 
 struct mtk_hdmi_phy_conf mtk_hdmi_phy_2701_conf = {
        .tz_disabled = true,
+       .flags = CLK_SET_RATE_GATE,
        .hdmi_phy_clk_ops = &mtk_hdmi_phy_pll_ops,
        .hdmi_phy_enable_tmds = mtk_hdmi_phy_enable_tmds,
        .hdmi_phy_disable_tmds = mtk_hdmi_phy_disable_tmds,
index ed5916b2765843b09056f0788ce93d190e5a0367..47f8a295168224b525959bafb8f0e631ee799b2b 100644 (file)
@@ -199,6 +199,20 @@ static void mtk_hdmi_pll_unprepare(struct clk_hw *hw)
        usleep_range(100, 150);
 }
 
+static long mtk_hdmi_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+                                   unsigned long *parent_rate)
+{
+       struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
+
+       hdmi_phy->pll_rate = rate;
+       if (rate <= 74250000)
+               *parent_rate = rate;
+       else
+               *parent_rate = rate / 2;
+
+       return rate;
+}
+
 static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
                                 unsigned long parent_rate)
 {
@@ -285,6 +299,14 @@ static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
        return 0;
 }
 
+static unsigned long mtk_hdmi_pll_recalc_rate(struct clk_hw *hw,
+                                             unsigned long parent_rate)
+{
+       struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
+
+       return hdmi_phy->pll_rate;
+}
+
 static const struct clk_ops mtk_hdmi_phy_pll_ops = {
        .prepare = mtk_hdmi_pll_prepare,
        .unprepare = mtk_hdmi_pll_unprepare,
@@ -309,6 +331,7 @@ static void mtk_hdmi_phy_disable_tmds(struct mtk_hdmi_phy *hdmi_phy)
 }
 
 struct mtk_hdmi_phy_conf mtk_hdmi_phy_8173_conf = {
+       .flags = CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE,
        .hdmi_phy_clk_ops = &mtk_hdmi_phy_pll_ops,
        .hdmi_phy_enable_tmds = mtk_hdmi_phy_enable_tmds,
        .hdmi_phy_disable_tmds = mtk_hdmi_phy_disable_tmds,
index 2281ed3eb7747757620288069f32d48a53b9ea15..8a4ebcb6405cee2427d0889ea49a0d871d2cc5ba 100644 (file)
@@ -337,12 +337,14 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
 
        ret = drm_dev_register(drm, 0);
        if (ret)
-               goto free_drm;
+               goto uninstall_irq;
 
        drm_fbdev_generic_setup(drm, 32);
 
        return 0;
 
+uninstall_irq:
+       drm_irq_uninstall(drm);
 free_drm:
        drm_dev_put(drm);
 
@@ -356,8 +358,8 @@ static int meson_drv_bind(struct device *dev)
 
 static void meson_drv_unbind(struct device *dev)
 {
-       struct drm_device *drm = dev_get_drvdata(dev);
-       struct meson_drm *priv = drm->dev_private;
+       struct meson_drm *priv = dev_get_drvdata(dev);
+       struct drm_device *drm = priv->drm;
 
        if (priv->canvas) {
                meson_canvas_free(priv->canvas, priv->canvas_id_osd1);
@@ -367,6 +369,7 @@ static void meson_drv_unbind(struct device *dev)
        }
 
        drm_dev_unregister(drm);
+       drm_irq_uninstall(drm);
        drm_kms_helper_poll_fini(drm);
        drm_mode_config_cleanup(drm);
        drm_dev_put(drm);
index e28814f4ea6cd2e05724ee46a0892b261d3d4cef..563953ec6ad03fd904c2e5c38de8cbe1dc2edce0 100644 (file)
@@ -569,7 +569,8 @@ dw_hdmi_mode_valid(struct drm_connector *connector,
        DRM_DEBUG_DRIVER("Modeline " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
 
        /* If sink max TMDS clock, we reject the mode */
-       if (mode->clock > connector->display_info.max_tmds_clock)
+       if (connector->display_info.max_tmds_clock &&
+           mode->clock > connector->display_info.max_tmds_clock)
                return MODE_BAD;
 
        /* Check against non-VIC supported modes */
index 340383150fb98d24567b6ca74cae0298cda806b6..ebf9c96d43eee56649e510a5ca8c53a045b10c67 100644 (file)
@@ -175,6 +175,7 @@ static int hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable)
                REG_FLD_MOD(core->base, HDMI_CORE_SYS_INTR_UNMASK4, 0, 3, 3);
                hdmi_wp_clear_irqenable(core->wp, HDMI_IRQ_CORE);
                hdmi_wp_set_irqstatus(core->wp, HDMI_IRQ_CORE);
+               REG_FLD_MOD(core->wp->base, HDMI_WP_CLK, 0, 5, 0);
                hdmi4_core_disable(core);
                return 0;
        }
@@ -182,16 +183,24 @@ static int hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable)
        if (err)
                return err;
 
+       /*
+        * Initialize CEC clock divider: CEC needs 2MHz clock hence
+        * set the divider to 24 to get 48/24=2MHz clock
+        */
+       REG_FLD_MOD(core->wp->base, HDMI_WP_CLK, 0x18, 5, 0);
+
        /* Clear TX FIFO */
        if (!hdmi_cec_clear_tx_fifo(adap)) {
                pr_err("cec-%s: could not clear TX FIFO\n", adap->name);
-               return -EIO;
+               err = -EIO;
+               goto err_disable_clk;
        }
 
        /* Clear RX FIFO */
        if (!hdmi_cec_clear_rx_fifo(adap)) {
                pr_err("cec-%s: could not clear RX FIFO\n", adap->name);
-               return -EIO;
+               err = -EIO;
+               goto err_disable_clk;
        }
 
        /* Clear CEC interrupts */
@@ -236,6 +245,12 @@ static int hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable)
                hdmi_write_reg(core->base, HDMI_CEC_INT_STATUS_1, temp);
        }
        return 0;
+
+err_disable_clk:
+       REG_FLD_MOD(core->wp->base, HDMI_WP_CLK, 0, 5, 0);
+       hdmi4_core_disable(core);
+
+       return err;
 }
 
 static int hdmi_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr)
@@ -333,11 +348,8 @@ int hdmi4_cec_init(struct platform_device *pdev, struct hdmi_core_data *core,
                return ret;
        core->wp = wp;
 
-       /*
-        * Initialize CEC clock divider: CEC needs 2MHz clock hence
-        * set the devider to 24 to get 48/24=2MHz clock
-        */
-       REG_FLD_MOD(core->wp->base, HDMI_WP_CLK, 0x18, 5, 0);
+       /* Disable clock initially, hdmi_cec_adap_enable() manages it */
+       REG_FLD_MOD(core->wp->base, HDMI_WP_CLK, 0, 5, 0);
 
        ret = cec_register_adapter(core->adap, &pdev->dev);
        if (ret < 0) {
index 813ba42f27539ce94b85afc20295e411f6b4c123..e384b95ad8573a7ef9ad9bb9a631432eda4a726f 100644 (file)
@@ -708,7 +708,7 @@ int hdmi4_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
        else
                acore.i2s_cfg.justification = HDMI_AUDIO_JUSTIFY_RIGHT;
        /*
-        * The I2S input word length is twice the lenght given in the IEC-60958
+        * The I2S input word length is twice the length given in the IEC-60958
         * status word. If the word size is greater than
         * 20 bits, increment by one.
         */
index c7d4c6073ea59b70c56559288def3fb7fd6fe215..0d4ade9d4722c340b706b82d7ea7bb587db5f293 100644 (file)
@@ -541,6 +541,18 @@ static void vop_core_clks_disable(struct vop *vop)
        clk_disable(vop->hclk);
 }
 
+static void vop_win_disable(struct vop *vop, const struct vop_win_data *win)
+{
+       if (win->phy->scl && win->phy->scl->ext) {
+               VOP_SCL_SET_EXT(vop, win, yrgb_hor_scl_mode, SCALE_NONE);
+               VOP_SCL_SET_EXT(vop, win, yrgb_ver_scl_mode, SCALE_NONE);
+               VOP_SCL_SET_EXT(vop, win, cbcr_hor_scl_mode, SCALE_NONE);
+               VOP_SCL_SET_EXT(vop, win, cbcr_ver_scl_mode, SCALE_NONE);
+       }
+
+       VOP_WIN_SET(vop, win, enable, 0);
+}
+
 static int vop_enable(struct drm_crtc *crtc)
 {
        struct vop *vop = to_vop(crtc);
@@ -586,7 +598,7 @@ static int vop_enable(struct drm_crtc *crtc)
                struct vop_win *vop_win = &vop->win[i];
                const struct vop_win_data *win = vop_win->data;
 
-               VOP_WIN_SET(vop, win, enable, 0);
+               vop_win_disable(vop, win);
        }
        spin_unlock(&vop->reg_lock);
 
@@ -735,7 +747,7 @@ static void vop_plane_atomic_disable(struct drm_plane *plane,
 
        spin_lock(&vop->reg_lock);
 
-       VOP_WIN_SET(vop, win, enable, 0);
+       vop_win_disable(vop, win);
 
        spin_unlock(&vop->reg_lock);
 }
@@ -1622,7 +1634,7 @@ static int vop_initial(struct vop *vop)
                int channel = i * 2 + 1;
 
                VOP_WIN_SET(vop, win, channel, (channel + 1) << 4 | channel);
-               VOP_WIN_SET(vop, win, enable, 0);
+               vop_win_disable(vop, win);
                VOP_WIN_SET(vop, win, gate, 1);
        }
 
index dc47720c99ba5689a8c12f217c7859960bfa394d..39d8509d96a0d3162f8bd7c310f4aa0a18898f6b 100644 (file)
@@ -48,8 +48,13 @@ static enum drm_mode_status
 sun8i_dw_hdmi_mode_valid_h6(struct drm_connector *connector,
                            const struct drm_display_mode *mode)
 {
-       /* This is max for HDMI 2.0b (4K@60Hz) */
-       if (mode->clock > 594000)
+       /*
+        * Controller support maximum of 594 MHz, which correlates to
+        * 4K@60Hz 4:4:4 or RGB. However, for frequencies greater than
+        * 340 MHz scrambling has to be enabled. Because scrambling is
+        * not yet implemented, just limit to 340 MHz for now.
+        */
+       if (mode->clock > 340000)
                return MODE_CLOCK_HIGH;
 
        return MODE_OK;
index fc36e0c10a374a2a33a054a4102944dfb0b03009..b1e7c76e9c17269664fddd5ab5c90c3477b80a0c 100644 (file)
@@ -227,7 +227,7 @@ static int sun8i_tcon_top_bind(struct device *dev, struct device *master,
 
 err_unregister_gates:
        for (i = 0; i < CLK_NUM; i++)
-               if (clk_data->hws[i])
+               if (!IS_ERR_OR_NULL(clk_data->hws[i]))
                        clk_hw_unregister_gate(clk_data->hws[i]);
        clk_disable_unprepare(tcon_top->bus);
 err_assert_reset:
@@ -245,7 +245,8 @@ static void sun8i_tcon_top_unbind(struct device *dev, struct device *master,
 
        of_clk_del_provider(dev->of_node);
        for (i = 0; i < CLK_NUM; i++)
-               clk_hw_unregister_gate(clk_data->hws[i]);
+               if (clk_data->hws[i])
+                       clk_hw_unregister_gate(clk_data->hws[i]);
 
        clk_disable_unprepare(tcon_top->bus);
        reset_control_assert(tcon_top->rst);
index ba9b3cfb8c3d247fae80f8026cc520936e5b954c..b3436c2aed6892b585ca221a9ac711027350310e 100644 (file)
@@ -378,14 +378,16 @@ static int tegra_shared_plane_atomic_check(struct drm_plane *plane,
 static void tegra_shared_plane_atomic_disable(struct drm_plane *plane,
                                              struct drm_plane_state *old_state)
 {
-       struct tegra_dc *dc = to_tegra_dc(old_state->crtc);
        struct tegra_plane *p = to_tegra_plane(plane);
+       struct tegra_dc *dc;
        u32 value;
 
        /* rien ne va plus */
        if (!old_state || !old_state->crtc)
                return;
 
+       dc = to_tegra_dc(old_state->crtc);
+
        /*
         * XXX Legacy helpers seem to sometimes call ->atomic_disable() even
         * on planes that are already disabled. Make sure we fallback to the
index 39bfed9623de28f0e62a0297f8e84b7151c28238..982ce37ecde1b0c9fc6ef07c9819b98541248151 100644 (file)
@@ -106,6 +106,7 @@ static int vic_boot(struct vic *vic)
        if (vic->booted)
                return 0;
 
+#ifdef CONFIG_IOMMU_API
        if (vic->config->supports_sid) {
                struct iommu_fwspec *spec = dev_iommu_fwspec_get(vic->dev);
                u32 value;
@@ -121,6 +122,7 @@ static int vic_boot(struct vic *vic)
                        vic_writel(vic, value, VIC_THI_STREAMID1);
                }
        }
+#endif
 
        /* setup clockgating registers */
        vic_writel(vic, CG_IDLE_CG_DLY_CNT(4) |
index 66885c24590f0147ce1a510991a546c4f2bbe427..c1bd5e3d9e4aee80bb185cc38307fb389fe54c2f 100644 (file)
 #include "udl_connector.h"
 #include "udl_drv.h"
 
-static bool udl_get_edid_block(struct udl_device *udl, int block_idx,
-                                                          u8 *buff)
+static int udl_get_edid_block(void *data, u8 *buf, unsigned int block,
+                              size_t len)
 {
        int ret, i;
        u8 *read_buff;
+       struct udl_device *udl = data;
 
        read_buff = kmalloc(2, GFP_KERNEL);
        if (!read_buff)
-               return false;
+               return -1;
 
-       for (i = 0; i < EDID_LENGTH; i++) {
-               int bval = (i + block_idx * EDID_LENGTH) << 8;
+       for (i = 0; i < len; i++) {
+               int bval = (i + block * EDID_LENGTH) << 8;
                ret = usb_control_msg(udl->udev,
                                      usb_rcvctrlpipe(udl->udev, 0),
                                          (0x02), (0x80 | (0x02 << 5)), bval,
@@ -37,60 +38,13 @@ static bool udl_get_edid_block(struct udl_device *udl, int block_idx,
                if (ret < 1) {
                        DRM_ERROR("Read EDID byte %d failed err %x\n", i, ret);
                        kfree(read_buff);
-                       return false;
+                       return -1;
                }
-               buff[i] = read_buff[1];
+               buf[i] = read_buff[1];
        }
 
        kfree(read_buff);
-       return true;
-}
-
-static bool udl_get_edid(struct udl_device *udl, u8 **result_buff,
-                        int *result_buff_size)
-{
-       int i, extensions;
-       u8 *block_buff = NULL, *buff_ptr;
-
-       block_buff = kmalloc(EDID_LENGTH, GFP_KERNEL);
-       if (block_buff == NULL)
-               return false;
-
-       if (udl_get_edid_block(udl, 0, block_buff) &&
-           memchr_inv(block_buff, 0, EDID_LENGTH)) {
-               extensions = ((struct edid *)block_buff)->extensions;
-               if (extensions > 0) {
-                       /* we have to read all extensions one by one */
-                       *result_buff_size = EDID_LENGTH * (extensions + 1);
-                       *result_buff = kmalloc(*result_buff_size, GFP_KERNEL);
-                       buff_ptr = *result_buff;
-                       if (buff_ptr == NULL) {
-                               kfree(block_buff);
-                               return false;
-                       }
-                       memcpy(buff_ptr, block_buff, EDID_LENGTH);
-                       kfree(block_buff);
-                       buff_ptr += EDID_LENGTH;
-                       for (i = 1; i < extensions; ++i) {
-                               if (udl_get_edid_block(udl, i, buff_ptr)) {
-                                       buff_ptr += EDID_LENGTH;
-                               } else {
-                                       kfree(*result_buff);
-                                       *result_buff = NULL;
-                                       return false;
-                               }
-                       }
-                       return true;
-               }
-               /* we have only base edid block */
-               *result_buff = block_buff;
-               *result_buff_size = EDID_LENGTH;
-               return true;
-       }
-
-       kfree(block_buff);
-
-       return false;
+       return 0;
 }
 
 static int udl_get_modes(struct drm_connector *connector)
@@ -122,8 +76,6 @@ static enum drm_mode_status udl_mode_valid(struct drm_connector *connector,
 static enum drm_connector_status
 udl_detect(struct drm_connector *connector, bool force)
 {
-       u8 *edid_buff = NULL;
-       int edid_buff_size = 0;
        struct udl_device *udl = connector->dev->dev_private;
        struct udl_drm_connector *udl_connector =
                                        container_of(connector,
@@ -136,12 +88,10 @@ udl_detect(struct drm_connector *connector, bool force)
                udl_connector->edid = NULL;
        }
 
-
-       if (!udl_get_edid(udl, &edid_buff, &edid_buff_size))
+       udl_connector->edid = drm_do_get_edid(connector, udl_get_edid_block, udl);
+       if (!udl_connector->edid)
                return connector_status_disconnected;
 
-       udl_connector->edid = (struct edid *)edid_buff;
-       
        return connector_status_connected;
 }
 
index 22cd2d13e272f033d3e54b9245986ce22fa74486..ff47f890e6ad8d554fa7180aab449321a34ce5c1 100644 (file)
@@ -52,6 +52,7 @@ static struct drm_driver driver = {
        .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
        .load = udl_driver_load,
        .unload = udl_driver_unload,
+       .release = udl_driver_release,
 
        /* gem hooks */
        .gem_free_object_unlocked = udl_gem_free_object,
index e9e9b1ff678ee0a81d0d4b100b816b19122c7f0c..4ae67d882eae928e6b39fb4f240a46bfc272ed15 100644 (file)
@@ -104,6 +104,7 @@ void udl_urb_completion(struct urb *urb);
 
 int udl_driver_load(struct drm_device *dev, unsigned long flags);
 void udl_driver_unload(struct drm_device *dev);
+void udl_driver_release(struct drm_device *dev);
 
 int udl_fbdev_init(struct drm_device *dev);
 void udl_fbdev_cleanup(struct drm_device *dev);
index 9086d0d1b880de87de7609e55798e534b5cb3039..1f8ef34ade24365bce4f522104be8347eafb1586 100644 (file)
@@ -379,6 +379,12 @@ void udl_driver_unload(struct drm_device *dev)
                udl_free_urb_list(dev);
 
        udl_fbdev_cleanup(dev);
-       udl_modeset_cleanup(dev);
        kfree(udl);
 }
+
+void udl_driver_release(struct drm_device *dev)
+{
+       udl_modeset_cleanup(dev);
+       drm_dev_fini(dev);
+       kfree(dev);
+}
index 5930facd6d2d85cca81cb9c1f5247a6be3632546..11a8f99ba18c5f007734abef1003cc44d5e778a1 100644 (file)
@@ -191,13 +191,9 @@ static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
        ret = drm_gem_handle_create(file, &obj->base, handle);
        drm_gem_object_put_unlocked(&obj->base);
        if (ret)
-               goto err;
+               return ERR_PTR(ret);
 
        return &obj->base;
-
-err:
-       __vgem_gem_destroy(obj);
-       return ERR_PTR(ret);
 }
 
 static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
index 138b0bb325cf9662cd59b5a54158947dc691a2d9..69048e73377dc97855aa3b71491008e5993a5304 100644 (file)
@@ -111,11 +111,8 @@ struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
 
        ret = drm_gem_handle_create(file, &obj->gem, handle);
        drm_gem_object_put_unlocked(&obj->gem);
-       if (ret) {
-               drm_gem_object_release(&obj->gem);
-               kfree(obj);
+       if (ret)
                return ERR_PTR(ret);
-       }
 
        return &obj->gem;
 }
index 27101c04a8272668988ce5be66dfc584068f8a60..4030d64916f004a03781fc7c5b7a36173bcf344f 100644 (file)
@@ -114,7 +114,7 @@ static inline void synchronize_syncpt_base(struct host1x_job *job)
 
 static void host1x_channel_set_streamid(struct host1x_channel *channel)
 {
-#if HOST1X_HW >= 6
+#if IS_ENABLED(CONFIG_IOMMU_API) &&  HOST1X_HW >= 6
        struct iommu_fwspec *spec = dev_iommu_fwspec_get(channel->dev->parent);
        u32 sid = spec ? spec->ids[0] & 0xffff : 0x7f;
 
index 6ca8d322b487279348d90513caa10d6bb6745e40..4ca0cdfa6b33af35f951a3969cf9c02b67d7a1b0 100644 (file)
@@ -150,6 +150,7 @@ config HID_ASUS
        tristate "Asus"
        depends on LEDS_CLASS
        depends on ASUS_WMI || ASUS_WMI=n
+       select POWER_SUPPLY
        ---help---
        Support for Asus notebook built-in keyboard and touchpad via i2c, and
        the Asus Republic of Gamers laptop keyboard special keys.
index 9993b692598fb84d1700e26ef7f97856ff842955..860e21ec6a492a35392f2b47146f5e0a811c1068 100644 (file)
@@ -1301,10 +1301,10 @@ static u32 __extract(u8 *report, unsigned offset, int n)
 u32 hid_field_extract(const struct hid_device *hid, u8 *report,
                        unsigned offset, unsigned n)
 {
-       if (n > 32) {
-               hid_warn(hid, "hid_field_extract() called with n (%d) > 32! (%s)\n",
+       if (n > 256) {
+               hid_warn(hid, "hid_field_extract() called with n (%d) > 256! (%s)\n",
                         n, current->comm);
-               n = 32;
+               n = 256;
        }
 
        return __extract(report, offset, n);
index ac9fda1b5a7233c227cd5517dcf6331a29483a60..1384e57182af978e4329c9e946e3487f85229be5 100644 (file)
@@ -1060,10 +1060,15 @@ static int hid_debug_rdesc_show(struct seq_file *f, void *p)
        seq_printf(f, "\n\n");
 
        /* dump parsed data and input mappings */
+       if (down_interruptible(&hdev->driver_input_lock))
+               return 0;
+
        hid_dump_device(hdev, f);
        seq_printf(f, "\n");
        hid_dump_input_mapping(hdev, f);
 
+       up(&hdev->driver_input_lock);
+
        return 0;
 }
 
index b6d93f4ad037e440d1e5d23d76058e4be606159c..adce58f24f7638a70c170f17a694b5baa7f5a49a 100644 (file)
 #define USB_DEVICE_ID_SYNAPTICS_HD     0x0ac3
 #define USB_DEVICE_ID_SYNAPTICS_QUAD_HD        0x1ac3
 #define USB_DEVICE_ID_SYNAPTICS_TP_V103        0x5710
+#define I2C_DEVICE_ID_SYNAPTICS_7E7E   0x7e7e
 
 #define USB_VENDOR_ID_TEXAS_INSTRUMENTS        0x2047
 #define USB_DEVICE_ID_TEXAS_INSTRUMENTS_LENOVO_YOGA    0x0855
index b10b1922c5bdf304a4f32100365da00f0f1572f4..1fce0076e7dc470e94cf561bf8c55b78cd92c6f2 100644 (file)
@@ -998,6 +998,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
                case 0x1b8: map_key_clear(KEY_VIDEO);           break;
                case 0x1bc: map_key_clear(KEY_MESSENGER);       break;
                case 0x1bd: map_key_clear(KEY_INFO);            break;
+               case 0x1cb: map_key_clear(KEY_ASSISTANT);       break;
                case 0x201: map_key_clear(KEY_NEW);             break;
                case 0x202: map_key_clear(KEY_OPEN);            break;
                case 0x203: map_key_clear(KEY_CLOSE);           break;
index 15ed6177a7a364d6b2634babe0df1be83b4cec7b..199cc256e9d9d3903016f64f66b36b909a9c1109 100644 (file)
@@ -2111,6 +2111,13 @@ static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index)
                kfree(data);
                return -ENOMEM;
        }
+       data->wq = create_singlethread_workqueue("hidpp-ff-sendqueue");
+       if (!data->wq) {
+               kfree(data->effect_ids);
+               kfree(data);
+               return -ENOMEM;
+       }
+
        data->hidpp = hidpp;
        data->feature_index = feature_index;
        data->version = version;
@@ -2155,7 +2162,6 @@ static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index)
        /* ignore boost value at response.fap.params[2] */
 
        /* init the hardware command queue */
-       data->wq = create_singlethread_workqueue("hidpp-ff-sendqueue");
        atomic_set(&data->workqueue_size, 0);
 
        /* initialize with zero autocenter to get wheel in usable state */
@@ -2608,8 +2614,9 @@ static int m560_raw_event(struct hid_device *hdev, u8 *data, int size)
                input_report_rel(mydata->input, REL_Y, v);
 
                v = hid_snto32(data[6], 8);
-               hidpp_scroll_counter_handle_scroll(
-                               &hidpp->vertical_wheel_counter, v);
+               if (v != 0)
+                       hidpp_scroll_counter_handle_scroll(
+                                       &hidpp->vertical_wheel_counter, v);
 
                input_sync(mydata->input);
        }
index 953908f2267c0653478cf88d53c7e85fdb121d76..77ffba48cc737e0df69892cfcceaafacb9815534 100644 (file)
@@ -715,7 +715,6 @@ static const struct hid_device_id hid_ignore_list[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_DEALEXTREAME, USB_DEVICE_ID_DEALEXTREAME_RADIO_SI4701) },
        { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EARTHMATE) },
        { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EM_LT20) },
-       { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, 0x0400) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) },
@@ -855,7 +854,7 @@ static const struct hid_device_id hid_ignore_list[] = {
        { }
 };
 
-/**
+/*
  * hid_mouse_ignore_list - mouse devices which should not be handled by the hid layer
  *
  * There are composite devices for which we want to ignore only a certain
@@ -996,6 +995,10 @@ bool hid_ignore(struct hid_device *hdev)
                if (hdev->product == 0x0401 &&
                    strncmp(hdev->name, "ELAN0800", 8) != 0)
                        return true;
+               /* Same with product id 0x0400 */
+               if (hdev->product == 0x0400 &&
+                   strncmp(hdev->name, "QTEC0001", 8) != 0)
+                       return true;
                break;
        }
 
@@ -1042,7 +1045,7 @@ static struct hid_device_id *hid_exists_dquirk(const struct hid_device *hdev)
        }
 
        if (bl_entry != NULL)
-               dbg_hid("Found dynamic quirk 0x%lx for HID device 0x%hx:0x%hx\n",
+               dbg_hid("Found dynamic quirk 0x%lx for HID device 0x%04x:0x%04x\n",
                        bl_entry->driver_data, bl_entry->vendor,
                        bl_entry->product);
 
@@ -1209,7 +1212,7 @@ static unsigned long hid_gets_squirk(const struct hid_device *hdev)
                quirks |= bl_entry->driver_data;
 
        if (quirks)
-               dbg_hid("Found squirk 0x%lx for HID device 0x%hx:0x%hx\n",
+               dbg_hid("Found squirk 0x%lx for HID device 0x%04x:0x%04x\n",
                        quirks, hdev->vendor, hdev->product);
        return quirks;
 }
index 8141cadfca0e3c3ce62eccff1c28cd94031827b1..8dae0f9b819e011d6695462fea7e88e85cd16669 100644 (file)
@@ -499,6 +499,7 @@ static void steam_battery_unregister(struct steam_device *steam)
 static int steam_register(struct steam_device *steam)
 {
        int ret;
+       bool client_opened;
 
        /*
         * This function can be called several times in a row with the
@@ -511,9 +512,11 @@ static int steam_register(struct steam_device *steam)
                 * Unlikely, but getting the serial could fail, and it is not so
                 * important, so make up a serial number and go on.
                 */
+               mutex_lock(&steam->mutex);
                if (steam_get_serial(steam) < 0)
                        strlcpy(steam->serial_no, "XXXXXXXXXX",
                                        sizeof(steam->serial_no));
+               mutex_unlock(&steam->mutex);
 
                hid_info(steam->hdev, "Steam Controller '%s' connected",
                                steam->serial_no);
@@ -528,13 +531,15 @@ static int steam_register(struct steam_device *steam)
        }
 
        mutex_lock(&steam->mutex);
-       if (!steam->client_opened) {
+       client_opened = steam->client_opened;
+       if (!client_opened)
                steam_set_lizard_mode(steam, lizard_mode);
+       mutex_unlock(&steam->mutex);
+
+       if (!client_opened)
                ret = steam_input_register(steam);
-       } else {
+       else
                ret = 0;
-       }
-       mutex_unlock(&steam->mutex);
 
        return ret;
 }
@@ -630,14 +635,21 @@ static void steam_client_ll_close(struct hid_device *hdev)
 {
        struct steam_device *steam = hdev->driver_data;
 
+       unsigned long flags;
+       bool connected;
+
+       spin_lock_irqsave(&steam->lock, flags);
+       connected = steam->connected;
+       spin_unlock_irqrestore(&steam->lock, flags);
+
        mutex_lock(&steam->mutex);
        steam->client_opened = false;
+       if (connected)
+               steam_set_lizard_mode(steam, lizard_mode);
        mutex_unlock(&steam->mutex);
 
-       if (steam->connected) {
-               steam_set_lizard_mode(steam, lizard_mode);
+       if (connected)
                steam_input_register(steam);
-       }
 }
 
 static int steam_client_ll_raw_request(struct hid_device *hdev,
index 7710d9f957da5b0dd07ca1444416de7cecd10529..0187c9f8fc22c5567e934cc0cc2089877963c56e 100644 (file)
@@ -735,10 +735,6 @@ static int uclogic_params_huion_init(struct uclogic_params *params,
                goto cleanup;
        }
        rc = usb_string(udev, 201, ver_ptr, ver_len);
-       if (ver_ptr == NULL) {
-               rc = -ENOMEM;
-               goto cleanup;
-       }
        if (rc == -EPIPE) {
                *ver_ptr = '\0';
        } else if (rc < 0) {
index 90164fed08d35eca2c34250c8b7cb3814ea99f53..4d1f24ee249c4455a4d5dfe18c7e6b0541311ad4 100644 (file)
@@ -184,6 +184,8 @@ static const struct i2c_hid_quirks {
                I2C_HID_QUIRK_NO_RUNTIME_PM },
        { USB_VENDOR_ID_ELAN, HID_ANY_ID,
                 I2C_HID_QUIRK_BOGUS_IRQ },
+       { USB_VENDOR_ID_SYNAPTICS, I2C_DEVICE_ID_SYNAPTICS_7E7E,
+               I2C_HID_QUIRK_NO_RUNTIME_PM },
        { 0, 0 }
 };
 
index 6f929bfa9fcd39380f7e9d9fc9729156e28e09f6..d0f1dfe2bcbbd652aa1daa682d2feac611dfa4da 100644 (file)
@@ -1759,6 +1759,7 @@ config SENSORS_VT8231
 config SENSORS_W83773G
        tristate "Nuvoton W83773G"
        depends on I2C
+       select REGMAP_I2C
        help
          If you say yes here you get support for the Nuvoton W83773G hardware
          monitoring chip.
index e4f9f7ce92fabc7c5f10aebaf5d69d350da565d2..f9abeeeead9e966dd8c8df7d225b81788eb67e40 100644 (file)
@@ -640,7 +640,7 @@ static const struct hwmon_channel_info ntc_chip = {
 };
 
 static const u32 ntc_temp_config[] = {
-       HWMON_T_INPUT, HWMON_T_TYPE,
+       HWMON_T_INPUT | HWMON_T_TYPE,
        0
 };
 
index b91a80abf724d087e02cc7aa97e211a1380f6570..4679acb4918e7f65660a8e0e32a7c5f793f03ff0 100644 (file)
@@ -890,6 +890,8 @@ static int occ_setup_sensor_attrs(struct occ *occ)
                                s++;
                        }
                }
+
+               s = (sensors->power.num_sensors * 4) + 1;
        } else {
                for (i = 0; i < sensors->power.num_sensors; ++i) {
                        s = i + 1;
@@ -918,11 +920,11 @@ static int occ_setup_sensor_attrs(struct occ *occ)
                                                     show_power, NULL, 3, i);
                        attr++;
                }
-       }
 
-       if (sensors->caps.num_sensors >= 1) {
                s = sensors->power.num_sensors + 1;
+       }
 
+       if (sensors->caps.num_sensors >= 1) {
                snprintf(attr->name, sizeof(attr->name), "power%d_label", s);
                attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_caps, NULL,
                                             0, 0);
index f2c6819712013046246002346af928bd1ab16bc0..f8979abb9a19ca963bf9625fc911ab74590b388a 100644 (file)
@@ -131,6 +131,7 @@ config I2C_I801
            Cannon Lake (PCH)
            Cedar Fork (PCH)
            Ice Lake (PCH)
+           Comet Lake (PCH)
 
          This driver can also be built as a module.  If so, the module
          will be called i2c-i801.
index c91e145ef5a56dbb1a512c23611f06ad7d22aa05..679c6c41f64b49babf8b0a7505d56a2c4093f6c7 100644 (file)
@@ -71,6 +71,7 @@
  * Cannon Lake-LP (PCH)                0x9da3  32      hard    yes     yes     yes
  * Cedar Fork (PCH)            0x18df  32      hard    yes     yes     yes
  * Ice Lake-LP (PCH)           0x34a3  32      hard    yes     yes     yes
+ * Comet Lake (PCH)            0x02a3  32      hard    yes     yes     yes
  *
  * Features supported by this driver:
  * Software PEC                                no
 #define PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS       0xa223
 #define PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS       0xa2a3
 #define PCI_DEVICE_ID_INTEL_CANNONLAKE_H_SMBUS         0xa323
+#define PCI_DEVICE_ID_INTEL_COMETLAKE_SMBUS            0x02a3
 
 struct i801_mux_config {
        char *gpio_chip;
@@ -1038,6 +1040,7 @@ static const struct pci_device_id i801_ids[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CANNONLAKE_H_SMBUS) },
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CANNONLAKE_LP_SMBUS) },
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICELAKE_LP_SMBUS) },
+       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_COMETLAKE_SMBUS) },
        { 0, }
 };
 
@@ -1534,6 +1537,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
        case PCI_DEVICE_ID_INTEL_DNV_SMBUS:
        case PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS:
        case PCI_DEVICE_ID_INTEL_ICELAKE_LP_SMBUS:
+       case PCI_DEVICE_ID_INTEL_COMETLAKE_SMBUS:
                priv->features |= FEATURE_I2C_BLOCK_READ;
                priv->features |= FEATURE_IRQ;
                priv->features |= FEATURE_SMBUS_PEC;
index 42fed40198a0fb77981e90236c465c9a12bff218..c0c3043b5d6119adb0b1345c17f4dd6025b37036 100644 (file)
@@ -1169,11 +1169,13 @@ static int i2c_imx_probe(struct platform_device *pdev)
        /* Init DMA config if supported */
        ret = i2c_imx_dma_request(i2c_imx, phy_addr);
        if (ret < 0)
-               goto clk_notifier_unregister;
+               goto del_adapter;
 
        dev_info(&i2c_imx->adapter.dev, "IMX I2C adapter registered\n");
        return 0;   /* Return OK */
 
+del_adapter:
+       i2c_del_adapter(&i2c_imx->adapter);
 clk_notifier_unregister:
        clk_notifier_unregister(i2c_imx->clk, &i2c_imx->clk_change_nb);
 rpm_disable:
index 2649e0f2ff65993e1401b0c2d29e7d3303687ffd..f5ecb660fe7d4a4bfe6222ab8ac141abb6584474 100644 (file)
@@ -351,7 +351,7 @@ static bool has_gateway(const struct dst_entry *dst, sa_family_t family)
 
        if (family == AF_INET) {
                rt = container_of(dst, struct rtable, dst);
-               return rt->rt_uses_gateway;
+               return rt->rt_gw_family == AF_INET;
        }
 
        rt6 = container_of(dst, struct rt6_info, dst);
index 612f04190ed8386e51ab5f8321464320140c1e77..9784c6c0d2ecfbbca031871f54fcc415602029fc 100644 (file)
@@ -13232,7 +13232,7 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
        int total_contexts;
        int ret;
        unsigned ngroups;
-       int qos_rmt_count;
+       int rmt_count;
        int user_rmt_reduced;
        u32 n_usr_ctxts;
        u32 send_contexts = chip_send_contexts(dd);
@@ -13294,10 +13294,20 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
                n_usr_ctxts = rcv_contexts - total_contexts;
        }
 
-       /* each user context requires an entry in the RMT */
-       qos_rmt_count = qos_rmt_entries(dd, NULL, NULL);
-       if (qos_rmt_count + n_usr_ctxts > NUM_MAP_ENTRIES) {
-               user_rmt_reduced = NUM_MAP_ENTRIES - qos_rmt_count;
+       /*
+        * The RMT entries are currently allocated as shown below:
+        * 1. QOS (0 to 128 entries);
+        * 2. FECN for PSM (num_user_contexts + num_vnic_contexts);
+        * 3. VNIC (num_vnic_contexts).
+        * It should be noted that PSM FECN oversubscribe num_vnic_contexts
+        * entries of RMT because both VNIC and PSM could allocate any receive
+        * context between dd->first_dyn_alloc_text and dd->num_rcv_contexts,
+        * and PSM FECN must reserve an RMT entry for each possible PSM receive
+        * context.
+        */
+       rmt_count = qos_rmt_entries(dd, NULL, NULL) + (num_vnic_contexts * 2);
+       if (rmt_count + n_usr_ctxts > NUM_MAP_ENTRIES) {
+               user_rmt_reduced = NUM_MAP_ENTRIES - rmt_count;
                dd_dev_err(dd,
                           "RMT size is reducing the number of user receive contexts from %u to %d\n",
                           n_usr_ctxts,
@@ -14285,9 +14295,11 @@ static void init_user_fecn_handling(struct hfi1_devdata *dd,
        u64 reg;
        int i, idx, regoff, regidx;
        u8 offset;
+       u32 total_cnt;
 
        /* there needs to be enough room in the map table */
-       if (rmt->used + dd->num_user_contexts >= NUM_MAP_ENTRIES) {
+       total_cnt = dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt;
+       if (rmt->used + total_cnt >= NUM_MAP_ENTRIES) {
                dd_dev_err(dd, "User FECN handling disabled - too many user contexts allocated\n");
                return;
        }
@@ -14341,7 +14353,7 @@ static void init_user_fecn_handling(struct hfi1_devdata *dd,
        /* add rule 1 */
        add_rsm_rule(dd, RSM_INS_FECN, &rrd);
 
-       rmt->used += dd->num_user_contexts;
+       rmt->used += total_cnt;
 }
 
 /* Initialize RSM for VNIC */
index 9b643c2409cf8bee5c28084b104c2839bc5768aa..eba300330a027acdae1b97c92af5ef07ece6b605 100644 (file)
@@ -898,7 +898,9 @@ void notify_error_qp(struct rvt_qp *qp)
                if (!list_empty(&priv->s_iowait.list) &&
                    !(qp->s_flags & RVT_S_BUSY) &&
                    !(priv->s_flags & RVT_S_BUSY)) {
-                       qp->s_flags &= ~RVT_S_ANY_WAIT_IO;
+                       qp->s_flags &= ~HFI1_S_ANY_WAIT_IO;
+                       iowait_clear_flag(&priv->s_iowait, IOWAIT_PENDING_IB);
+                       iowait_clear_flag(&priv->s_iowait, IOWAIT_PENDING_TID);
                        list_del_init(&priv->s_iowait.list);
                        priv->s_iowait.lock = NULL;
                        rvt_put_qp(qp);
index e6726c1ab8669a66722835b43d8b6b3481a11754..5991211d72bdd84d307ab4ebc245a011899eb4bc 100644 (file)
@@ -3088,7 +3088,7 @@ void hfi1_rc_rcv(struct hfi1_packet *packet)
                        update_ack_queue(qp, next);
                }
                e = &qp->s_ack_queue[qp->r_head_ack_queue];
-               if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
+               if (e->rdma_sge.mr) {
                        rvt_put_mr(e->rdma_sge.mr);
                        e->rdma_sge.mr = NULL;
                }
@@ -3166,7 +3166,7 @@ void hfi1_rc_rcv(struct hfi1_packet *packet)
                        update_ack_queue(qp, next);
                }
                e = &qp->s_ack_queue[qp->r_head_ack_queue];
-               if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
+               if (e->rdma_sge.mr) {
                        rvt_put_mr(e->rdma_sge.mr);
                        e->rdma_sge.mr = NULL;
                }
index fdda33aca77f2031ea2357435c029de0508ffdec..43cbce7a19ea43f42af2464a782221da2ee386bf 100644 (file)
@@ -5017,24 +5017,14 @@ int hfi1_make_tid_rdma_pkt(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
            make_tid_rdma_ack(qp, ohdr, ps))
                return 1;
 
-       if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) {
-               if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
-                       goto bail;
-               /* We are in the error state, flush the work request. */
-               if (qp->s_last == READ_ONCE(qp->s_head))
-                       goto bail;
-               /* If DMAs are in progress, we can't flush immediately. */
-               if (iowait_sdma_pending(&priv->s_iowait)) {
-                       qp->s_flags |= RVT_S_WAIT_DMA;
-                       goto bail;
-               }
-               clear_ahg(qp);
-               wqe = rvt_get_swqe_ptr(qp, qp->s_last);
-               hfi1_trdma_send_complete(qp, wqe, qp->s_last != qp->s_acked ?
-                                        IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR);
-               /* will get called again */
-               goto done_free_tx;
-       }
+       /*
+        * Bail out if we can't send data.
+        * Be reminded that this check must been done after the call to
+        * make_tid_rdma_ack() because the responding QP could be in
+        * RTR state where it can send TID RDMA ACK, not TID RDMA WRITE DATA.
+        */
+       if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK))
+               goto bail;
 
        if (priv->s_flags & RVT_S_WAIT_ACK)
                goto bail;
@@ -5144,11 +5134,6 @@ int hfi1_make_tid_rdma_pkt(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
        hfi1_make_ruc_header(qp, ohdr, (opcode << 24), bth1, bth2,
                             middle, ps);
        return 1;
-done_free_tx:
-       hfi1_put_txreq(ps->s_txreq);
-       ps->s_txreq = NULL;
-       return 1;
-
 bail:
        hfi1_put_txreq(ps->s_txreq);
 bail_no_tx:
index f1fec56f3ff49047d7ade725d13b43d3dba8bbb6..8e29dbb5b5fbc3bd883384915e76155201572848 100644 (file)
@@ -792,6 +792,8 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
                idx_offset = (obj & (table->num_obj - 1)) % obj_per_chunk;
                dma_offset = offset = idx_offset * table->obj_size;
        } else {
+               u32 seg_size = 64; /* 8 bytes per BA and 8 BA per segment */
+
                hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop);
                /* mtt mhop */
                i = mhop.l0_idx;
@@ -803,8 +805,8 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
                        hem_idx = i;
 
                hem = table->hem[hem_idx];
-               dma_offset = offset = (obj & (table->num_obj - 1)) *
-                                      table->obj_size % mhop.bt_chunk_size;
+               dma_offset = offset = (obj & (table->num_obj - 1)) * seg_size %
+                                      mhop.bt_chunk_size;
                if (mhop.hop_num == 2)
                        dma_offset = offset = 0;
        }
index b09f1cde2ff54ca9522a60d21ba3e82967938574..08be0e4eabcd764e9af0a666cec02fad1e921f76 100644 (file)
@@ -746,7 +746,6 @@ static int hns_roce_write_mtt_chunk(struct hns_roce_dev *hr_dev,
        struct hns_roce_hem_table *table;
        dma_addr_t dma_handle;
        __le64 *mtts;
-       u32 s = start_index * sizeof(u64);
        u32 bt_page_size;
        u32 i;
 
@@ -780,7 +779,8 @@ static int hns_roce_write_mtt_chunk(struct hns_roce_dev *hr_dev,
                return -EINVAL;
 
        mtts = hns_roce_table_find(hr_dev, table,
-                               mtt->first_seg + s / hr_dev->caps.mtt_entry_sz,
+                               mtt->first_seg +
+                               start_index / HNS_ROCE_MTT_ENTRY_PER_SEG,
                                &dma_handle);
        if (!mtts)
                return -ENOMEM;
index 57c76eafef2f8a896ff336af5bfa10954c99f1e4..66cdf625534ff8901a6efdf90295eaee3cc0145f 100644 (file)
@@ -274,9 +274,6 @@ void hns_roce_qp_free(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
        wait_for_completion(&hr_qp->free);
 
        if ((hr_qp->ibqp.qp_type) != IB_QPT_GSI) {
-               if (hr_dev->caps.sccc_entry_sz)
-                       hns_roce_table_put(hr_dev, &qp_table->sccc_table,
-                                          hr_qp->qpn);
                if (hr_dev->caps.trrl_entry_sz)
                        hns_roce_table_put(hr_dev, &qp_table->trrl_table,
                                           hr_qp->qpn);
index 6bcc63aaa50bba036ee3612a929751f32f47f1fa..be95ac5aeb308625048f35a761900f40929761ff 100644 (file)
@@ -148,7 +148,7 @@ int mlx5_cmd_alloc_memic(struct mlx5_memic *memic, phys_addr_t *addr,
                        return ret;
                }
 
-               *addr = pci_resource_start(dev->pdev, 0) +
+               *addr = dev->bar_addr +
                        MLX5_GET64(alloc_memic_out, out, memic_start_addr);
 
                return 0;
@@ -167,7 +167,7 @@ int mlx5_cmd_dealloc_memic(struct mlx5_memic *memic, u64 addr, u64 length)
        u64 start_page_idx;
        int err;
 
-       addr -= pci_resource_start(dev->pdev, 0);
+       addr -= dev->bar_addr;
        start_page_idx = (addr - hw_start_addr) >> PAGE_SHIFT;
 
        MLX5_SET(dealloc_memic_in, in, opcode, MLX5_CMD_OP_DEALLOC_MEMIC);
index 531ff20b32ade6ccb4d0b3533bc1f8ceceed1b26..0845e95d2d11e8cc73442d3219c383fc695221ce 100644 (file)
@@ -2009,7 +2009,7 @@ static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev,
 
        fw_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ? MLX5_UARS_IN_PAGE : 1;
 
-       return (pci_resource_start(dev->mdev->pdev, 0) >> PAGE_SHIFT) + uar_idx / fw_uars_per_page;
+       return (dev->mdev->bar_addr >> PAGE_SHIFT) + uar_idx / fw_uars_per_page;
 }
 
 static int get_command(unsigned long offset)
@@ -2199,7 +2199,7 @@ static int dm_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
            page_idx + npages)
                return -EINVAL;
 
-       pfn = ((pci_resource_start(dev->mdev->pdev, 0) +
+       pfn = ((dev->mdev->bar_addr +
              MLX5_CAP64_DEV_MEM(dev->mdev, memic_bar_start_addr)) >>
              PAGE_SHIFT) +
              page_idx;
@@ -2283,7 +2283,7 @@ struct ib_dm *mlx5_ib_alloc_dm(struct ib_device *ibdev,
                goto err_free;
 
        start_offset = memic_addr & ~PAGE_MASK;
-       page_idx = (memic_addr - pci_resource_start(memic->dev->pdev, 0) -
+       page_idx = (memic_addr - memic->dev->bar_addr -
                    MLX5_CAP64_DEV_MEM(memic->dev, memic_bar_start_addr)) >>
                    PAGE_SHIFT;
 
@@ -2326,7 +2326,7 @@ int mlx5_ib_dealloc_dm(struct ib_dm *ibdm)
        if (ret)
                return ret;
 
-       page_idx = (dm->dev_addr - pci_resource_start(memic->dev->pdev, 0) -
+       page_idx = (dm->dev_addr - memic->dev->bar_addr -
                    MLX5_CAP64_DEV_MEM(memic->dev, memic_bar_start_addr)) >>
                    PAGE_SHIFT;
        bitmap_clear(to_mucontext(ibdm->uobject->context)->dm_pages,
index c85f002558843ff50402bf14ba14ede892beb911..ca921fd4049963b84ad4f0541505ade7714eabbc 100644 (file)
@@ -1194,8 +1194,7 @@ static struct ib_mr *mlx5_ib_get_memic_mr(struct ib_pd *pd, u64 memic_addr,
        MLX5_SET64(mkc, mkc, len, length);
        MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
        MLX5_SET(mkc, mkc, qpn, 0xffffff);
-       MLX5_SET64(mkc, mkc, start_addr,
-                  memic_addr - pci_resource_start(dev->mdev->pdev, 0));
+       MLX5_SET64(mkc, mkc, start_addr, memic_addr - dev->mdev->bar_addr);
 
        err = mlx5_core_create_mkey(mdev, &mr->mmkey, in, inlen);
        if (err)
index c20bfc41ecf18602cd0f289941d49dd6d37390da..0aa10ebda5d9af2f60f5d98807a2de6ec307ad04 100644 (file)
@@ -585,7 +585,7 @@ static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
        struct ib_umem_odp *odp_mr = to_ib_umem_odp(mr->umem);
        bool downgrade = flags & MLX5_PF_FLAGS_DOWNGRADE;
        bool prefetch = flags & MLX5_PF_FLAGS_PREFETCH;
-       u64 access_mask = ODP_READ_ALLOWED_BIT;
+       u64 access_mask;
        u64 start_idx, page_mask;
        struct ib_umem_odp *odp;
        size_t size;
@@ -607,6 +607,7 @@ static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
        page_shift = mr->umem->page_shift;
        page_mask = ~(BIT(page_shift) - 1);
        start_idx = (io_virt - (mr->mmkey.iova & page_mask)) >> page_shift;
+       access_mask = ODP_READ_ALLOWED_BIT;
 
        if (prefetch && !downgrade && !mr->umem->writable) {
                /* prefetch with write-access must
index 7cd006da1daef05cd335dc77cda8281e179630c4..ef7d69269a88de4fbb8ab42a853840c10c26263d 100644 (file)
@@ -5119,7 +5119,7 @@ static int _mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
                wmb();
 
                /* currently we support only regular doorbells */
-               mlx5_write64((__be32 *)ctrl, bf->bfreg->map + bf->offset, NULL);
+               mlx5_write64((__be32 *)ctrl, bf->bfreg->map + bf->offset);
                /* Make sure doorbells don't leak out of SQ spinlock
                 * and reach the HCA out of order.
                 */
index 032883180f6524ce69872fd72f2e2519ad9e5056..0010a3ed64f154b4220db3ee757cf1f44affb82c 100644 (file)
@@ -1407,7 +1407,7 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
                if (neigh->nud_state & NUD_VALID) {
                        nes_debug(NES_DBG_CM, "Neighbor MAC address for 0x%08X"
                                  " is %pM, Gateway is 0x%08X \n", dst_ip,
-                                 neigh->ha, ntohl(rt->rt_gateway));
+                                 neigh->ha, ntohl(rt->rt_gw4));
 
                        if (arpindex >= 0) {
                                if (ether_addr_equal(nesadapter->arp_table[arpindex].mac_addr, neigh->ha)) {
index 6d8b3e0de57a8e0d3e8071d9aebd0707618cebb4..ec41400fec0c01aa4d2894b7652701a90a190fff 100644 (file)
@@ -1131,6 +1131,8 @@ static void pvrdma_pci_remove(struct pci_dev *pdev)
        pvrdma_page_dir_cleanup(dev, &dev->cq_pdir);
        pvrdma_page_dir_cleanup(dev, &dev->async_pdir);
        pvrdma_free_slots(dev);
+       dma_free_coherent(&pdev->dev, sizeof(*dev->dsr), dev->dsr,
+                         dev->dsrbase);
 
        iounmap(dev->regs);
        kfree(dev->sgid_tbl);
index 21cb088d66877a4bd6c8c914e72eff5c385de031..f7cdd2ab7f11f6cba22003d4cf71576b4bc77b72 100644 (file)
@@ -3169,21 +3169,24 @@ static void amd_iommu_get_resv_regions(struct device *dev,
                return;
 
        list_for_each_entry(entry, &amd_iommu_unity_map, list) {
+               int type, prot = 0;
                size_t length;
-               int prot = 0;
 
                if (devid < entry->devid_start || devid > entry->devid_end)
                        continue;
 
+               type   = IOMMU_RESV_DIRECT;
                length = entry->address_end - entry->address_start;
                if (entry->prot & IOMMU_PROT_IR)
                        prot |= IOMMU_READ;
                if (entry->prot & IOMMU_PROT_IW)
                        prot |= IOMMU_WRITE;
+               if (entry->prot & IOMMU_UNITY_MAP_FLAG_EXCL_RANGE)
+                       /* Exclusion range */
+                       type = IOMMU_RESV_RESERVED;
 
                region = iommu_alloc_resv_region(entry->address_start,
-                                                length, prot,
-                                                IOMMU_RESV_DIRECT);
+                                                length, prot, type);
                if (!region) {
                        dev_err(dev, "Out of memory allocating dm-regions\n");
                        return;
index f773792d77fd533be53ec3796692c140f77121d7..ff40ba758cf365e89ddeb2270971e1536554b817 100644 (file)
@@ -359,7 +359,7 @@ static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val)
 static void iommu_set_exclusion_range(struct amd_iommu *iommu)
 {
        u64 start = iommu->exclusion_start & PAGE_MASK;
-       u64 limit = (start + iommu->exclusion_length) & PAGE_MASK;
+       u64 limit = (start + iommu->exclusion_length - 1) & PAGE_MASK;
        u64 entry;
 
        if (!iommu->exclusion_start)
@@ -2013,6 +2013,9 @@ static int __init init_unity_map_range(struct ivmd_header *m)
        if (e == NULL)
                return -ENOMEM;
 
+       if (m->flags & IVMD_FLAG_EXCL_RANGE)
+               init_exclusion_range(m);
+
        switch (m->type) {
        default:
                kfree(e);
@@ -2059,9 +2062,7 @@ static int __init init_memory_definitions(struct acpi_table_header *table)
 
        while (p < end) {
                m = (struct ivmd_header *)p;
-               if (m->flags & IVMD_FLAG_EXCL_RANGE)
-                       init_exclusion_range(m);
-               else if (m->flags & IVMD_FLAG_UNITY_MAP)
+               if (m->flags & (IVMD_FLAG_UNITY_MAP | IVMD_FLAG_EXCL_RANGE))
                        init_unity_map_range(m);
 
                p += m->length;
index eae0741f72dce2fcea771e415a986b062515c7fc..87965e4d964771bd2352d6254bba299f43734107 100644 (file)
 #define IOMMU_PROT_IR 0x01
 #define IOMMU_PROT_IW 0x02
 
+#define IOMMU_UNITY_MAP_FLAG_EXCL_RANGE        (1 << 2)
+
 /* IOMMU capabilities */
 #define IOMMU_CAP_IOTLB   24
 #define IOMMU_CAP_NPCACHE 26
index f101afc315abb8da199fa1a9d2bd4df3d44d82e9..9a8a8870e26727e7398afffd5286860b0e8581d9 100644 (file)
 
 #define ARM_V7S_TCR_PD1                        BIT(5)
 
+#ifdef CONFIG_ZONE_DMA32
+#define ARM_V7S_TABLE_GFP_DMA GFP_DMA32
+#define ARM_V7S_TABLE_SLAB_FLAGS SLAB_CACHE_DMA32
+#else
+#define ARM_V7S_TABLE_GFP_DMA GFP_DMA
+#define ARM_V7S_TABLE_SLAB_FLAGS SLAB_CACHE_DMA
+#endif
+
 typedef u32 arm_v7s_iopte;
 
 static bool selftest_running;
@@ -197,13 +205,16 @@ static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp,
        void *table = NULL;
 
        if (lvl == 1)
-               table = (void *)__get_dma_pages(__GFP_ZERO, get_order(size));
+               table = (void *)__get_free_pages(
+                       __GFP_ZERO | ARM_V7S_TABLE_GFP_DMA, get_order(size));
        else if (lvl == 2)
-               table = kmem_cache_zalloc(data->l2_tables, gfp | GFP_DMA);
+               table = kmem_cache_zalloc(data->l2_tables, gfp);
        phys = virt_to_phys(table);
-       if (phys != (arm_v7s_iopte)phys)
+       if (phys != (arm_v7s_iopte)phys) {
                /* Doesn't fit in PTE */
+               dev_err(dev, "Page table does not fit in PTE: %pa", &phys);
                goto out_free;
+       }
        if (table && !(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) {
                dma = dma_map_single(dev, table, size, DMA_TO_DEVICE);
                if (dma_mapping_error(dev, dma))
@@ -733,7 +744,7 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
        data->l2_tables = kmem_cache_create("io-pgtable_armv7s_l2",
                                            ARM_V7S_TABLE_SIZE(2),
                                            ARM_V7S_TABLE_SIZE(2),
-                                           SLAB_CACHE_DMA, NULL);
+                                           ARM_V7S_TABLE_SLAB_FLAGS, NULL);
        if (!data->l2_tables)
                goto out_free_data;
 
index 33a982e33716369b7d2bf5885ef5a8ed22f04414..109de67d5d727c227d3970b2879edd60d6478357 100644 (file)
@@ -1105,10 +1105,12 @@ struct iommu_group *iommu_group_get_for_dev(struct device *dev)
 
                dom = __iommu_domain_alloc(dev->bus, iommu_def_domain_type);
                if (!dom && iommu_def_domain_type != IOMMU_DOMAIN_DMA) {
-                       dev_warn(dev,
-                                "failed to allocate default IOMMU domain of type %u; falling back to IOMMU_DOMAIN_DMA",
-                                iommu_def_domain_type);
                        dom = __iommu_domain_alloc(dev->bus, IOMMU_DOMAIN_DMA);
+                       if (dom) {
+                               dev_warn(dev,
+                                        "failed to allocate default IOMMU domain of type %u; falling back to IOMMU_DOMAIN_DMA",
+                                        iommu_def_domain_type);
+                       }
                }
 
                group->default_domain = dom;
index 86b72fbd3b45dd63f495de36435410997740f473..353111a104133a0faeadf5d46e3e2f1bbf617d27 100644 (file)
@@ -130,6 +130,7 @@ static int __init ls1x_intc_of_init(struct device_node *node,
                                             NULL);
        if (!priv->domain) {
                pr_err("ls1x-irq: cannot add IRQ domain\n");
+               err = -ENOMEM;
                goto out_iounmap;
        }
 
index 4ab8b1b6608f7136365f91d713f65647a8271296..a14e35d405387d4dc43bf672139c773bf4b05d2f 100644 (file)
@@ -710,10 +710,10 @@ base_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
        struct sock *sk = sock->sk;
        int err = 0;
 
-       if (!maddr || maddr->family != AF_ISDN)
+       if (addr_len < sizeof(struct sockaddr_mISDN))
                return -EINVAL;
 
-       if (addr_len < sizeof(struct sockaddr_mISDN))
+       if (!maddr || maddr->family != AF_ISDN)
                return -EINVAL;
 
        lock_sock(sk);
index 7fea18b0c15d115178c874163232fa6c2e3706ce..7cb4d685a1f107f335c7cf475d06aae6688ab6bc 100644 (file)
@@ -513,6 +513,7 @@ static int pca9532_probe(struct i2c_client *client,
        const struct i2c_device_id *id)
 {
        int devid;
+       const struct of_device_id *of_id;
        struct pca9532_data *data = i2c_get_clientdata(client);
        struct pca9532_platform_data *pca9532_pdata =
                        dev_get_platdata(&client->dev);
@@ -528,8 +529,11 @@ static int pca9532_probe(struct i2c_client *client,
                        dev_err(&client->dev, "no platform data\n");
                        return -EINVAL;
                }
-               devid = (int)(uintptr_t)of_match_device(
-                       of_pca9532_leds_match, &client->dev)->data;
+               of_id = of_match_device(of_pca9532_leds_match,
+                               &client->dev);
+               if (unlikely(!of_id))
+                       return -EINVAL;
+               devid = (int)(uintptr_t) of_id->data;
        } else {
                devid = id->driver_data;
        }
index 3dd3ed46d473b673fd916085044bd3c3b38b0759..136f86a1627d18cf396990ca1a4122d17578d0af 100644 (file)
@@ -122,7 +122,8 @@ static ssize_t device_name_store(struct device *dev,
                trigger_data->net_dev = NULL;
        }
 
-       strncpy(trigger_data->device_name, buf, size);
+       memcpy(trigger_data->device_name, buf, size);
+       trigger_data->device_name[size] = 0;
        if (size > 0 && trigger_data->device_name[size - 1] == '\n')
                trigger_data->device_name[size - 1] = 0;
 
@@ -301,11 +302,11 @@ static int netdev_trig_notify(struct notifier_block *nb,
                container_of(nb, struct led_netdev_data, notifier);
 
        if (evt != NETDEV_UP && evt != NETDEV_DOWN && evt != NETDEV_CHANGE
-           && evt != NETDEV_REGISTER && evt != NETDEV_UNREGISTER
-           && evt != NETDEV_CHANGENAME)
+           && evt != NETDEV_REGISTER && evt != NETDEV_UNREGISTER)
                return NOTIFY_DONE;
 
-       if (strcmp(dev->name, trigger_data->device_name))
+       if (!(dev == trigger_data->net_dev ||
+             (evt == NETDEV_REGISTER && !strcmp(dev->name, trigger_data->device_name))))
                return NOTIFY_DONE;
 
        cancel_delayed_work_sync(&trigger_data->work);
@@ -320,12 +321,9 @@ static int netdev_trig_notify(struct notifier_block *nb,
                dev_hold(dev);
                trigger_data->net_dev = dev;
                break;
-       case NETDEV_CHANGENAME:
        case NETDEV_UNREGISTER:
-               if (trigger_data->net_dev) {
-                       dev_put(trigger_data->net_dev);
-                       trigger_data->net_dev = NULL;
-               }
+               dev_put(trigger_data->net_dev);
+               trigger_data->net_dev = NULL;
                break;
        case NETDEV_UP:
        case NETDEV_CHANGE:
index 3789185144dae34241911d3c42308e6cbad650f2..0b7d5fb4548dcd8720f86e98a2567e3d387a061d 100644 (file)
@@ -231,14 +231,14 @@ static void pblk_end_partial_read(struct nvm_rq *rqd)
        struct pblk_sec_meta *meta;
        struct bio *new_bio = rqd->bio;
        struct bio *bio = pr_ctx->orig_bio;
-       struct bio_vec src_bv, dst_bv;
        void *meta_list = rqd->meta_list;
-       int bio_init_idx = pr_ctx->bio_init_idx;
        unsigned long *read_bitmap = pr_ctx->bitmap;
+       struct bvec_iter orig_iter = BVEC_ITER_ALL_INIT;
+       struct bvec_iter new_iter = BVEC_ITER_ALL_INIT;
        int nr_secs = pr_ctx->orig_nr_secs;
        int nr_holes = nr_secs - bitmap_weight(read_bitmap, nr_secs);
        void *src_p, *dst_p;
-       int hole, i;
+       int bit, i;
 
        if (unlikely(nr_holes == 1)) {
                struct ppa_addr ppa;
@@ -257,33 +257,39 @@ static void pblk_end_partial_read(struct nvm_rq *rqd)
 
        /* Fill the holes in the original bio */
        i = 0;
-       hole = find_first_zero_bit(read_bitmap, nr_secs);
-       do {
-               struct pblk_line *line;
+       for (bit = 0; bit < nr_secs; bit++) {
+               if (!test_bit(bit, read_bitmap)) {
+                       struct bio_vec dst_bv, src_bv;
+                       struct pblk_line *line;
 
-               line = pblk_ppa_to_line(pblk, rqd->ppa_list[i]);
-               kref_put(&line->ref, pblk_line_put);
+                       line = pblk_ppa_to_line(pblk, rqd->ppa_list[i]);
+                       kref_put(&line->ref, pblk_line_put);
 
-               meta = pblk_get_meta(pblk, meta_list, hole);
-               meta->lba = cpu_to_le64(pr_ctx->lba_list_media[i]);
+                       meta = pblk_get_meta(pblk, meta_list, bit);
+                       meta->lba = cpu_to_le64(pr_ctx->lba_list_media[i]);
 
-               src_bv = new_bio->bi_io_vec[i++];
-               dst_bv = bio->bi_io_vec[bio_init_idx + hole];
+                       dst_bv = bio_iter_iovec(bio, orig_iter);
+                       src_bv = bio_iter_iovec(new_bio, new_iter);
 
-               src_p = kmap_atomic(src_bv.bv_page);
-               dst_p = kmap_atomic(dst_bv.bv_page);
+                       src_p = kmap_atomic(src_bv.bv_page);
+                       dst_p = kmap_atomic(dst_bv.bv_page);
 
-               memcpy(dst_p + dst_bv.bv_offset,
-                       src_p + src_bv.bv_offset,
-                       PBLK_EXPOSED_PAGE_SIZE);
+                       memcpy(dst_p + dst_bv.bv_offset,
+                               src_p + src_bv.bv_offset,
+                               PBLK_EXPOSED_PAGE_SIZE);
 
-               kunmap_atomic(src_p);
-               kunmap_atomic(dst_p);
+                       kunmap_atomic(src_p);
+                       kunmap_atomic(dst_p);
 
-               mempool_free(src_bv.bv_page, &pblk->page_bio_pool);
+                       flush_dcache_page(dst_bv.bv_page);
+                       mempool_free(src_bv.bv_page, &pblk->page_bio_pool);
 
-               hole = find_next_zero_bit(read_bitmap, nr_secs, hole + 1);
-       } while (hole < nr_secs);
+                       bio_advance_iter(new_bio, &new_iter,
+                                       PBLK_EXPOSED_PAGE_SIZE);
+                       i++;
+               }
+               bio_advance_iter(bio, &orig_iter, PBLK_EXPOSED_PAGE_SIZE);
+       }
 
        bio_put(new_bio);
        kfree(pr_ctx);
index 95c6d86ab5e8deaa0d5975240f708fe165061e6e..c4ef1fceead6ee1ba83bfb0e54f2eefae26bd48b 100644 (file)
@@ -115,6 +115,7 @@ struct mapped_device {
        struct srcu_struct io_barrier;
 };
 
+void disable_discard(struct mapped_device *md);
 void disable_write_same(struct mapped_device *md);
 void disable_write_zeroes(struct mapped_device *md);
 
index b53f30f16b4d4f2c02bf9c15e5b801234b8cd9ae..4b76f84424c3c1a73ef3bc3b9605a1486e3bf88b 100644 (file)
@@ -36,7 +36,7 @@ struct dm_device {
        struct list_head list;
 };
 
-const char *dm_allowed_targets[] __initconst = {
+const char * const dm_allowed_targets[] __initconst = {
        "crypt",
        "delay",
        "linear",
index d57d997a52c81cfe6c68918520316f993aeebc44..7c678f50aaa37a5612ea23bed69e0fb31526224e 100644 (file)
@@ -913,7 +913,7 @@ static void copy_from_journal(struct dm_integrity_c *ic, unsigned section, unsig
 static bool ranges_overlap(struct dm_integrity_range *range1, struct dm_integrity_range *range2)
 {
        return range1->logical_sector < range2->logical_sector + range2->n_sectors &&
-              range2->logical_sector + range2->n_sectors > range2->logical_sector;
+              range1->logical_sector + range1->n_sectors > range2->logical_sector;
 }
 
 static bool add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range, bool check_waiting)
@@ -959,8 +959,6 @@ static void remove_range_unlocked(struct dm_integrity_c *ic, struct dm_integrity
                struct dm_integrity_range *last_range =
                        list_first_entry(&ic->wait_list, struct dm_integrity_range, wait_entry);
                struct task_struct *last_range_task;
-               if (!ranges_overlap(range, last_range))
-                       break;
                last_range_task = last_range->task;
                list_del(&last_range->wait_entry);
                if (!add_new_range(ic, last_range, false)) {
@@ -3185,7 +3183,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
                        journal_watermark = val;
                else if (sscanf(opt_string, "commit_time:%u%c", &val, &dummy) == 1)
                        sync_msec = val;
-               else if (!memcmp(opt_string, "meta_device:", strlen("meta_device:"))) {
+               else if (!strncmp(opt_string, "meta_device:", strlen("meta_device:"))) {
                        if (ic->meta_dev) {
                                dm_put_device(ti, ic->meta_dev);
                                ic->meta_dev = NULL;
@@ -3204,17 +3202,17 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
                                goto bad;
                        }
                        ic->sectors_per_block = val >> SECTOR_SHIFT;
-               } else if (!memcmp(opt_string, "internal_hash:", strlen("internal_hash:"))) {
+               } else if (!strncmp(opt_string, "internal_hash:", strlen("internal_hash:"))) {
                        r = get_alg_and_key(opt_string, &ic->internal_hash_alg, &ti->error,
                                            "Invalid internal_hash argument");
                        if (r)
                                goto bad;
-               } else if (!memcmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) {
+               } else if (!strncmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) {
                        r = get_alg_and_key(opt_string, &ic->journal_crypt_alg, &ti->error,
                                            "Invalid journal_crypt argument");
                        if (r)
                                goto bad;
-               } else if (!memcmp(opt_string, "journal_mac:", strlen("journal_mac:"))) {
+               } else if (!strncmp(opt_string, "journal_mac:", strlen("journal_mac:"))) {
                        r = get_alg_and_key(opt_string, &ic->journal_mac_alg,  &ti->error,
                                            "Invalid journal_mac argument");
                        if (r)
@@ -3616,7 +3614,7 @@ static struct target_type integrity_target = {
        .io_hints               = dm_integrity_io_hints,
 };
 
-int __init dm_integrity_init(void)
+static int __init dm_integrity_init(void)
 {
        int r;
 
@@ -3635,7 +3633,7 @@ int __init dm_integrity_init(void)
        return r;
 }
 
-void dm_integrity_exit(void)
+static void __exit dm_integrity_exit(void)
 {
        dm_unregister_target(&integrity_target);
        kmem_cache_destroy(journal_io_cache);
index 09773636602d3d86728b127ddb67b2d674b67cae..b66745bd08bbcc2dd1ab349f47c7326199518778 100644 (file)
@@ -222,11 +222,14 @@ static void dm_done(struct request *clone, blk_status_t error, bool mapped)
        }
 
        if (unlikely(error == BLK_STS_TARGET)) {
-               if (req_op(clone) == REQ_OP_WRITE_SAME &&
-                   !clone->q->limits.max_write_same_sectors)
+               if (req_op(clone) == REQ_OP_DISCARD &&
+                   !clone->q->limits.max_discard_sectors)
+                       disable_discard(tio->md);
+               else if (req_op(clone) == REQ_OP_WRITE_SAME &&
+                        !clone->q->limits.max_write_same_sectors)
                        disable_write_same(tio->md);
-               if (req_op(clone) == REQ_OP_WRITE_ZEROES &&
-                   !clone->q->limits.max_write_zeroes_sectors)
+               else if (req_op(clone) == REQ_OP_WRITE_ZEROES &&
+                        !clone->q->limits.max_write_zeroes_sectors)
                        disable_write_zeroes(tio->md);
        }
 
index ba9481f1bf3c04cf64c7ea5e570f2a2bf533759c..cde3b49b2a9107abafd76d190c9fc61209141f7b 100644 (file)
@@ -1844,6 +1844,36 @@ static bool dm_table_supports_secure_erase(struct dm_table *t)
        return true;
 }
 
+static int device_requires_stable_pages(struct dm_target *ti,
+                                       struct dm_dev *dev, sector_t start,
+                                       sector_t len, void *data)
+{
+       struct request_queue *q = bdev_get_queue(dev->bdev);
+
+       return q && bdi_cap_stable_pages_required(q->backing_dev_info);
+}
+
+/*
+ * If any underlying device requires stable pages, a table must require
+ * them as well.  Only targets that support iterate_devices are considered:
+ * don't want error, zero, etc to require stable pages.
+ */
+static bool dm_table_requires_stable_pages(struct dm_table *t)
+{
+       struct dm_target *ti;
+       unsigned i;
+
+       for (i = 0; i < dm_table_get_num_targets(t); i++) {
+               ti = dm_table_get_target(t, i);
+
+               if (ti->type->iterate_devices &&
+                   ti->type->iterate_devices(ti, device_requires_stable_pages, NULL))
+                       return true;
+       }
+
+       return false;
+}
+
 void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
                               struct queue_limits *limits)
 {
@@ -1896,6 +1926,15 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
 
        dm_table_verify_integrity(t);
 
+       /*
+        * Some devices don't use blk_integrity but still want stable pages
+        * because they do their own checksumming.
+        */
+       if (dm_table_requires_stable_pages(t))
+               q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
+       else
+               q->backing_dev_info->capabilities &= ~BDI_CAP_STABLE_WRITES;
+
        /*
         * Determine whether or not this queue's I/O timings contribute
         * to the entropy pool, Only request-based targets use this.
index 68d24056d0b1c17d7fa0c271d1d5582d7eb72c89..043f0761e4a0aea8a22a1c6745f3f9bbbc021dfd 100644 (file)
@@ -945,6 +945,15 @@ static void dec_pending(struct dm_io *io, blk_status_t error)
        }
 }
 
+void disable_discard(struct mapped_device *md)
+{
+       struct queue_limits *limits = dm_get_queue_limits(md);
+
+       /* device doesn't really support DISCARD, disable it */
+       limits->max_discard_sectors = 0;
+       blk_queue_flag_clear(QUEUE_FLAG_DISCARD, md->queue);
+}
+
 void disable_write_same(struct mapped_device *md)
 {
        struct queue_limits *limits = dm_get_queue_limits(md);
@@ -970,11 +979,14 @@ static void clone_endio(struct bio *bio)
        dm_endio_fn endio = tio->ti->type->end_io;
 
        if (unlikely(error == BLK_STS_TARGET) && md->type != DM_TYPE_NVME_BIO_BASED) {
-               if (bio_op(bio) == REQ_OP_WRITE_SAME &&
-                   !bio->bi_disk->queue->limits.max_write_same_sectors)
+               if (bio_op(bio) == REQ_OP_DISCARD &&
+                   !bio->bi_disk->queue->limits.max_discard_sectors)
+                       disable_discard(md);
+               else if (bio_op(bio) == REQ_OP_WRITE_SAME &&
+                        !bio->bi_disk->queue->limits.max_write_same_sectors)
                        disable_write_same(md);
-               if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
-                   !bio->bi_disk->queue->limits.max_write_zeroes_sectors)
+               else if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
+                        !bio->bi_disk->queue->limits.max_write_zeroes_sectors)
                        disable_write_zeroes(md);
        }
 
@@ -1042,15 +1054,7 @@ int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
                return -EINVAL;
        }
 
-       /*
-        * BIO based queue uses its own splitting. When multipage bvecs
-        * is switched on, size of the incoming bio may be too big to
-        * be handled in some targets, such as crypt.
-        *
-        * When these targets are ready for the big bio, we can remove
-        * the limit.
-        */
-       ti->max_io_len = min_t(uint32_t, len, BIO_MAX_PAGES * PAGE_SIZE);
+       ti->max_io_len = (uint32_t) len;
 
        return 0;
 }
index 0ce2d8dfc5f1a19bedbae37f91ce956b1b13da89..26ad6468d13a786552f581e3f45eb444febf0b51 100644 (file)
@@ -1246,7 +1246,7 @@ config MFD_STA2X11
 
 config MFD_SUN6I_PRCM
        bool "Allwinner A31 PRCM controller"
-       depends on ARCH_SUNXI
+       depends on ARCH_SUNXI || COMPILE_TEST
        select MFD_CORE
        help
          Support for the PRCM (Power/Reset/Clock Management) unit available
index 69df27769c2136e817d3baf575269c59902752ac..43ac71691fe477f95eba6293cc6f6b2df810e243 100644 (file)
@@ -53,67 +53,67 @@ static const struct sprd_pmic_data sc2731_data = {
 static const struct mfd_cell sprd_pmic_devs[] = {
        {
                .name = "sc27xx-wdt",
-               .of_compatible = "sprd,sc27xx-wdt",
+               .of_compatible = "sprd,sc2731-wdt",
        }, {
                .name = "sc27xx-rtc",
-               .of_compatible = "sprd,sc27xx-rtc",
+               .of_compatible = "sprd,sc2731-rtc",
        }, {
                .name = "sc27xx-charger",
-               .of_compatible = "sprd,sc27xx-charger",
+               .of_compatible = "sprd,sc2731-charger",
        }, {
                .name = "sc27xx-chg-timer",
-               .of_compatible = "sprd,sc27xx-chg-timer",
+               .of_compatible = "sprd,sc2731-chg-timer",
        }, {
                .name = "sc27xx-fast-chg",
-               .of_compatible = "sprd,sc27xx-fast-chg",
+               .of_compatible = "sprd,sc2731-fast-chg",
        }, {
                .name = "sc27xx-chg-wdt",
-               .of_compatible = "sprd,sc27xx-chg-wdt",
+               .of_compatible = "sprd,sc2731-chg-wdt",
        }, {
                .name = "sc27xx-typec",
-               .of_compatible = "sprd,sc27xx-typec",
+               .of_compatible = "sprd,sc2731-typec",
        }, {
                .name = "sc27xx-flash",
-               .of_compatible = "sprd,sc27xx-flash",
+               .of_compatible = "sprd,sc2731-flash",
        }, {
                .name = "sc27xx-eic",
-               .of_compatible = "sprd,sc27xx-eic",
+               .of_compatible = "sprd,sc2731-eic",
        }, {
                .name = "sc27xx-efuse",
-               .of_compatible = "sprd,sc27xx-efuse",
+               .of_compatible = "sprd,sc2731-efuse",
        }, {
                .name = "sc27xx-thermal",
-               .of_compatible = "sprd,sc27xx-thermal",
+               .of_compatible = "sprd,sc2731-thermal",
        }, {
                .name = "sc27xx-adc",
-               .of_compatible = "sprd,sc27xx-adc",
+               .of_compatible = "sprd,sc2731-adc",
        }, {
                .name = "sc27xx-audio-codec",
-               .of_compatible = "sprd,sc27xx-audio-codec",
+               .of_compatible = "sprd,sc2731-audio-codec",
        }, {
                .name = "sc27xx-regulator",
-               .of_compatible = "sprd,sc27xx-regulator",
+               .of_compatible = "sprd,sc2731-regulator",
        }, {
                .name = "sc27xx-vibrator",
-               .of_compatible = "sprd,sc27xx-vibrator",
+               .of_compatible = "sprd,sc2731-vibrator",
        }, {
                .name = "sc27xx-keypad-led",
-               .of_compatible = "sprd,sc27xx-keypad-led",
+               .of_compatible = "sprd,sc2731-keypad-led",
        }, {
                .name = "sc27xx-bltc",
-               .of_compatible = "sprd,sc27xx-bltc",
+               .of_compatible = "sprd,sc2731-bltc",
        }, {
                .name = "sc27xx-fgu",
-               .of_compatible = "sprd,sc27xx-fgu",
+               .of_compatible = "sprd,sc2731-fgu",
        }, {
                .name = "sc27xx-7sreset",
-               .of_compatible = "sprd,sc27xx-7sreset",
+               .of_compatible = "sprd,sc2731-7sreset",
        }, {
                .name = "sc27xx-poweroff",
-               .of_compatible = "sprd,sc27xx-poweroff",
+               .of_compatible = "sprd,sc2731-poweroff",
        }, {
                .name = "sc27xx-syscon",
-               .of_compatible = "sprd,sc27xx-syscon",
+               .of_compatible = "sprd,sc2731-syscon",
        },
 };
 
index 299016bc46d909b4164708044ae21c9d5d5d14f4..104477b512a296b56e7549f001b63cd1ad9a43ac 100644 (file)
@@ -1245,6 +1245,28 @@ twl_probe(struct i2c_client *client, const struct i2c_device_id *id)
        return status;
 }
 
+static int __maybe_unused twl_suspend(struct device *dev)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+
+       if (client->irq)
+               disable_irq(client->irq);
+
+       return 0;
+}
+
+static int __maybe_unused twl_resume(struct device *dev)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+
+       if (client->irq)
+               enable_irq(client->irq);
+
+       return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(twl_dev_pm_ops, twl_suspend, twl_resume);
+
 static const struct i2c_device_id twl_ids[] = {
        { "twl4030", TWL4030_VAUX2 },   /* "Triton 2" */
        { "twl5030", 0 },               /* T2 updated */
@@ -1262,6 +1284,7 @@ static const struct i2c_device_id twl_ids[] = {
 /* One Client Driver , 4 Clients */
 static struct i2c_driver twl_driver = {
        .driver.name    = DRIVER_NAME,
+       .driver.pm      = &twl_dev_pm_ops,
        .id_table       = twl_ids,
        .probe          = twl_probe,
        .remove         = twl_remove,
index 3525236ed8d9d702e25fac066926ba1933fe4edc..19c84214a7ea8890543ea8341033ed1ceb89df12 100644 (file)
@@ -179,6 +179,12 @@ static void cs_do_release(struct kref *ref)
 
        /* We also need to update CI for internal queues */
        if (cs->submitted) {
+               int cs_cnt = atomic_dec_return(&hdev->cs_active_cnt);
+
+               WARN_ONCE((cs_cnt < 0),
+                       "hl%d: error in CS active cnt %d\n",
+                       hdev->id, cs_cnt);
+
                hl_int_hw_queue_update_ci(cs);
 
                spin_lock(&hdev->hw_queues_mirror_lock);
index a53c12aff6ad9cebd9be4a2b031b9a93ea6c72b3..974a87789bd8689d1530daa8890bac3b3b32d38c 100644 (file)
@@ -232,6 +232,7 @@ static int vm_show(struct seq_file *s, void *data)
        struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;
        enum vm_type_t *vm_type;
        bool once = true;
+       u64 j;
        int i;
 
        if (!dev_entry->hdev->mmu_enable)
@@ -260,7 +261,7 @@ static int vm_show(struct seq_file *s, void *data)
                        } else {
                                phys_pg_pack = hnode->ptr;
                                seq_printf(s,
-                                       "    0x%-14llx      %-10u       %-4u\n",
+                                       "    0x%-14llx      %-10llu       %-4u\n",
                                        hnode->vaddr, phys_pg_pack->total_size,
                                        phys_pg_pack->handle);
                        }
@@ -282,9 +283,9 @@ static int vm_show(struct seq_file *s, void *data)
                                                phys_pg_pack->page_size);
                        seq_puts(s, "   physical address\n");
                        seq_puts(s, "---------------------\n");
-                       for (i = 0 ; i < phys_pg_pack->npages ; i++) {
+                       for (j = 0 ; j < phys_pg_pack->npages ; j++) {
                                seq_printf(s, "    0x%-14llx\n",
-                                               phys_pg_pack->pages[i]);
+                                               phys_pg_pack->pages[j]);
                        }
                }
                spin_unlock(&vm->idr_lock);
index de46aa6ed1542438c5d5952ff77c9cc17dadc5a6..77d51be66c7e84045558fff78eea0a8e9a70439e 100644 (file)
@@ -11,6 +11,8 @@
 #include <linux/sched/signal.h>
 #include <linux/hwmon.h>
 
+#define HL_PLDM_PENDING_RESET_PER_SEC  (HL_PENDING_RESET_PER_SEC * 10)
+
 bool hl_device_disabled_or_in_reset(struct hl_device *hdev)
 {
        if ((hdev->disabled) || (atomic_read(&hdev->in_reset)))
@@ -216,6 +218,7 @@ static int device_early_init(struct hl_device *hdev)
        spin_lock_init(&hdev->hw_queues_mirror_lock);
        atomic_set(&hdev->in_reset, 0);
        atomic_set(&hdev->fd_open_cnt, 0);
+       atomic_set(&hdev->cs_active_cnt, 0);
 
        return 0;
 
@@ -413,6 +416,27 @@ int hl_device_suspend(struct hl_device *hdev)
 
        pci_save_state(hdev->pdev);
 
+       /* Block future CS/VM/JOB completion operations */
+       rc = atomic_cmpxchg(&hdev->in_reset, 0, 1);
+       if (rc) {
+               dev_err(hdev->dev, "Can't suspend while in reset\n");
+               return -EIO;
+       }
+
+       /* This blocks all other stuff that is not blocked by in_reset */
+       hdev->disabled = true;
+
+       /*
+        * Flush anyone that is inside the critical section of enqueue
+        * jobs to the H/W
+        */
+       hdev->asic_funcs->hw_queues_lock(hdev);
+       hdev->asic_funcs->hw_queues_unlock(hdev);
+
+       /* Flush processes that are sending message to CPU */
+       mutex_lock(&hdev->send_cpu_message_lock);
+       mutex_unlock(&hdev->send_cpu_message_lock);
+
        rc = hdev->asic_funcs->suspend(hdev);
        if (rc)
                dev_err(hdev->dev,
@@ -440,21 +464,38 @@ int hl_device_resume(struct hl_device *hdev)
 
        pci_set_power_state(hdev->pdev, PCI_D0);
        pci_restore_state(hdev->pdev);
-       rc = pci_enable_device(hdev->pdev);
+       rc = pci_enable_device_mem(hdev->pdev);
        if (rc) {
                dev_err(hdev->dev,
                        "Failed to enable PCI device in resume\n");
                return rc;
        }
 
+       pci_set_master(hdev->pdev);
+
        rc = hdev->asic_funcs->resume(hdev);
        if (rc) {
-               dev_err(hdev->dev,
-                       "Failed to enable PCI access from device CPU\n");
-               return rc;
+               dev_err(hdev->dev, "Failed to resume device after suspend\n");
+               goto disable_device;
+       }
+
+
+       hdev->disabled = false;
+       atomic_set(&hdev->in_reset, 0);
+
+       rc = hl_device_reset(hdev, true, false);
+       if (rc) {
+               dev_err(hdev->dev, "Failed to reset device during resume\n");
+               goto disable_device;
        }
 
        return 0;
+
+disable_device:
+       pci_clear_master(hdev->pdev);
+       pci_disable_device(hdev->pdev);
+
+       return rc;
 }
 
 static void hl_device_hard_reset_pending(struct work_struct *work)
@@ -462,9 +503,16 @@ static void hl_device_hard_reset_pending(struct work_struct *work)
        struct hl_device_reset_work *device_reset_work =
                container_of(work, struct hl_device_reset_work, reset_work);
        struct hl_device *hdev = device_reset_work->hdev;
-       u16 pending_cnt = HL_PENDING_RESET_PER_SEC;
+       u16 pending_total, pending_cnt;
        struct task_struct *task = NULL;
 
+       if (hdev->pldm)
+               pending_total = HL_PLDM_PENDING_RESET_PER_SEC;
+       else
+               pending_total = HL_PENDING_RESET_PER_SEC;
+
+       pending_cnt = pending_total;
+
        /* Flush all processes that are inside hl_open */
        mutex_lock(&hdev->fd_open_cnt_lock);
 
@@ -489,6 +537,19 @@ static void hl_device_hard_reset_pending(struct work_struct *work)
                }
        }
 
+       pending_cnt = pending_total;
+
+       while ((atomic_read(&hdev->fd_open_cnt)) && (pending_cnt)) {
+
+               pending_cnt--;
+
+               ssleep(1);
+       }
+
+       if (atomic_read(&hdev->fd_open_cnt))
+               dev_crit(hdev->dev,
+                       "Going to hard reset with open user contexts\n");
+
        mutex_unlock(&hdev->fd_open_cnt_lock);
 
        hl_device_reset(hdev, true, true);
index 238dd57c541bdf1e632f8ff008f69bafc3e5e59a..ea979ebd62fb8c5f30d08b052a0e481325470ece 100644 (file)
@@ -1201,15 +1201,6 @@ static int goya_stop_external_queues(struct hl_device *hdev)
        return retval;
 }
 
-static void goya_resume_external_queues(struct hl_device *hdev)
-{
-       WREG32(mmDMA_QM_0_GLBL_CFG1, 0);
-       WREG32(mmDMA_QM_1_GLBL_CFG1, 0);
-       WREG32(mmDMA_QM_2_GLBL_CFG1, 0);
-       WREG32(mmDMA_QM_3_GLBL_CFG1, 0);
-       WREG32(mmDMA_QM_4_GLBL_CFG1, 0);
-}
-
 /*
  * goya_init_cpu_queues - Initialize PQ/CQ/EQ of CPU
  *
@@ -2178,36 +2169,6 @@ static int goya_stop_internal_queues(struct hl_device *hdev)
        return retval;
 }
 
-static void goya_resume_internal_queues(struct hl_device *hdev)
-{
-       WREG32(mmMME_QM_GLBL_CFG1, 0);
-       WREG32(mmMME_CMDQ_GLBL_CFG1, 0);
-
-       WREG32(mmTPC0_QM_GLBL_CFG1, 0);
-       WREG32(mmTPC0_CMDQ_GLBL_CFG1, 0);
-
-       WREG32(mmTPC1_QM_GLBL_CFG1, 0);
-       WREG32(mmTPC1_CMDQ_GLBL_CFG1, 0);
-
-       WREG32(mmTPC2_QM_GLBL_CFG1, 0);
-       WREG32(mmTPC2_CMDQ_GLBL_CFG1, 0);
-
-       WREG32(mmTPC3_QM_GLBL_CFG1, 0);
-       WREG32(mmTPC3_CMDQ_GLBL_CFG1, 0);
-
-       WREG32(mmTPC4_QM_GLBL_CFG1, 0);
-       WREG32(mmTPC4_CMDQ_GLBL_CFG1, 0);
-
-       WREG32(mmTPC5_QM_GLBL_CFG1, 0);
-       WREG32(mmTPC5_CMDQ_GLBL_CFG1, 0);
-
-       WREG32(mmTPC6_QM_GLBL_CFG1, 0);
-       WREG32(mmTPC6_CMDQ_GLBL_CFG1, 0);
-
-       WREG32(mmTPC7_QM_GLBL_CFG1, 0);
-       WREG32(mmTPC7_CMDQ_GLBL_CFG1, 0);
-}
-
 static void goya_dma_stall(struct hl_device *hdev)
 {
        WREG32(mmDMA_QM_0_GLBL_CFG1, 1 << DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT);
@@ -2905,20 +2866,6 @@ int goya_suspend(struct hl_device *hdev)
 {
        int rc;
 
-       rc = goya_stop_internal_queues(hdev);
-
-       if (rc) {
-               dev_err(hdev->dev, "failed to stop internal queues\n");
-               return rc;
-       }
-
-       rc = goya_stop_external_queues(hdev);
-
-       if (rc) {
-               dev_err(hdev->dev, "failed to stop external queues\n");
-               return rc;
-       }
-
        rc = goya_send_pci_access_msg(hdev, ARMCP_PACKET_DISABLE_PCI_ACCESS);
        if (rc)
                dev_err(hdev->dev, "Failed to disable PCI access from CPU\n");
@@ -2928,15 +2875,7 @@ int goya_suspend(struct hl_device *hdev)
 
 int goya_resume(struct hl_device *hdev)
 {
-       int rc;
-
-       goya_resume_external_queues(hdev);
-       goya_resume_internal_queues(hdev);
-
-       rc = goya_send_pci_access_msg(hdev, ARMCP_PACKET_ENABLE_PCI_ACCESS);
-       if (rc)
-               dev_err(hdev->dev, "Failed to enable PCI access from CPU\n");
-       return rc;
+       return goya_init_iatu(hdev);
 }
 
 static int goya_cb_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
@@ -3070,7 +3009,7 @@ void *goya_get_int_queue_base(struct hl_device *hdev, u32 queue_id,
 
        *dma_handle = hdev->asic_prop.sram_base_address;
 
-       base = hdev->pcie_bar[SRAM_CFG_BAR_ID];
+       base = (void *) hdev->pcie_bar[SRAM_CFG_BAR_ID];
 
        switch (queue_id) {
        case GOYA_QUEUE_ID_MME:
index a7c95e9f9b9a8808efa70651e66c34625ac82d0a..a8ee52c880cd800651681b866048126b2e9fc478 100644 (file)
@@ -793,11 +793,11 @@ struct hl_vm_hash_node {
  * struct hl_vm_phys_pg_pack - physical page pack.
  * @vm_type: describes the type of the virtual area descriptor.
  * @pages: the physical page array.
+ * @npages: num physical pages in the pack.
+ * @total_size: total size of all the pages in this list.
  * @mapping_cnt: number of shared mappings.
  * @asid: the context related to this list.
- * @npages: num physical pages in the pack.
  * @page_size: size of each page in the pack.
- * @total_size: total size of all the pages in this list.
  * @flags: HL_MEM_* flags related to this list.
  * @handle: the provided handle related to this list.
  * @offset: offset from the first page.
@@ -807,11 +807,11 @@ struct hl_vm_hash_node {
 struct hl_vm_phys_pg_pack {
        enum vm_type_t          vm_type; /* must be first */
        u64                     *pages;
+       u64                     npages;
+       u64                     total_size;
        atomic_t                mapping_cnt;
        u32                     asid;
-       u32                     npages;
        u32                     page_size;
-       u32                     total_size;
        u32                     flags;
        u32                     handle;
        u32                     offset;
@@ -1056,13 +1056,15 @@ struct hl_device_reset_work {
  * @cb_pool_lock: protects the CB pool.
  * @user_ctx: current user context executing.
  * @dram_used_mem: current DRAM memory consumption.
- * @in_reset: is device in reset flow.
- * @curr_pll_profile: current PLL profile.
- * @fd_open_cnt: number of open user processes.
  * @timeout_jiffies: device CS timeout value.
  * @max_power: the max power of the device, as configured by the sysadmin. This
  *             value is saved so in case of hard-reset, KMD will restore this
  *             value and update the F/W after the re-initialization
+ * @in_reset: is device in reset flow.
+ * @curr_pll_profile: current PLL profile.
+ * @fd_open_cnt: number of open user processes.
+ * @cs_active_cnt: number of active command submissions on this device (active
+ *                 means already in H/W queues)
  * @major: habanalabs KMD major.
  * @high_pll: high PLL profile frequency.
  * @soft_reset_cnt: number of soft reset since KMD loading.
@@ -1128,11 +1130,12 @@ struct hl_device {
        struct hl_ctx                   *user_ctx;
 
        atomic64_t                      dram_used_mem;
+       u64                             timeout_jiffies;
+       u64                             max_power;
        atomic_t                        in_reset;
        atomic_t                        curr_pll_profile;
        atomic_t                        fd_open_cnt;
-       u64                             timeout_jiffies;
-       u64                             max_power;
+       atomic_t                        cs_active_cnt;
        u32                             major;
        u32                             high_pll;
        u32                             soft_reset_cnt;
index 67bece26417cbe930fa018abdb33c88ba8618b23..ef3bb695136025971c76b916a97dde8a4b36905b 100644 (file)
@@ -370,12 +370,13 @@ int hl_hw_queue_schedule_cs(struct hl_cs *cs)
                spin_unlock(&hdev->hw_queues_mirror_lock);
        }
 
-       list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node) {
+       atomic_inc(&hdev->cs_active_cnt);
+
+       list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
                if (job->ext_queue)
                        ext_hw_queue_schedule_job(job);
                else
                        int_hw_queue_schedule_job(job);
-       }
 
        cs->submitted = true;
 
index 3a12fd1a5274479e89406947991fd709203e6726..ce1fda40a8b8112572b9a26db139c8aa6de76f8e 100644 (file)
@@ -56,9 +56,9 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
        struct hl_device *hdev = ctx->hdev;
        struct hl_vm *vm = &hdev->vm;
        struct hl_vm_phys_pg_pack *phys_pg_pack;
-       u64 paddr = 0;
-       u32 total_size, num_pgs, num_curr_pgs, page_size, page_shift;
-       int handle, rc, i;
+       u64 paddr = 0, total_size, num_pgs, i;
+       u32 num_curr_pgs, page_size, page_shift;
+       int handle, rc;
        bool contiguous;
 
        num_curr_pgs = 0;
@@ -73,7 +73,7 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
                paddr = (u64) gen_pool_alloc(vm->dram_pg_pool, total_size);
                if (!paddr) {
                        dev_err(hdev->dev,
-                               "failed to allocate %u huge contiguous pages\n",
+                               "failed to allocate %llu huge contiguous pages\n",
                                num_pgs);
                        return -ENOMEM;
                }
@@ -93,7 +93,7 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
        phys_pg_pack->flags = args->flags;
        phys_pg_pack->contiguous = contiguous;
 
-       phys_pg_pack->pages = kcalloc(num_pgs, sizeof(u64), GFP_KERNEL);
+       phys_pg_pack->pages = kvmalloc_array(num_pgs, sizeof(u64), GFP_KERNEL);
        if (!phys_pg_pack->pages) {
                rc = -ENOMEM;
                goto pages_arr_err;
@@ -148,7 +148,7 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
                        gen_pool_free(vm->dram_pg_pool, phys_pg_pack->pages[i],
                                        page_size);
 
-       kfree(phys_pg_pack->pages);
+       kvfree(phys_pg_pack->pages);
 pages_arr_err:
        kfree(phys_pg_pack);
 pages_pack_err:
@@ -267,7 +267,7 @@ static void free_phys_pg_pack(struct hl_device *hdev,
                struct hl_vm_phys_pg_pack *phys_pg_pack)
 {
        struct hl_vm *vm = &hdev->vm;
-       int i;
+       u64 i;
 
        if (!phys_pg_pack->created_from_userptr) {
                if (phys_pg_pack->contiguous) {
@@ -288,7 +288,7 @@ static void free_phys_pg_pack(struct hl_device *hdev,
                }
        }
 
-       kfree(phys_pg_pack->pages);
+       kvfree(phys_pg_pack->pages);
        kfree(phys_pg_pack);
 }
 
@@ -519,7 +519,7 @@ static inline int add_va_block(struct hl_device *hdev,
  * - Return the start address of the virtual block
  */
 static u64 get_va_block(struct hl_device *hdev,
-               struct hl_va_range *va_range, u32 size, u64 hint_addr,
+               struct hl_va_range *va_range, u64 size, u64 hint_addr,
                bool is_userptr)
 {
        struct hl_vm_va_block *va_block, *new_va_block = NULL;
@@ -577,7 +577,8 @@ static u64 get_va_block(struct hl_device *hdev,
        }
 
        if (!new_va_block) {
-               dev_err(hdev->dev, "no available va block for size %u\n", size);
+               dev_err(hdev->dev, "no available va block for size %llu\n",
+                               size);
                goto out;
        }
 
@@ -648,8 +649,8 @@ static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
        struct hl_vm_phys_pg_pack *phys_pg_pack;
        struct scatterlist *sg;
        dma_addr_t dma_addr;
-       u64 page_mask;
-       u32 npages, total_npages, page_size = PAGE_SIZE;
+       u64 page_mask, total_npages;
+       u32 npages, page_size = PAGE_SIZE;
        bool first = true, is_huge_page_opt = true;
        int rc, i, j;
 
@@ -691,7 +692,8 @@ static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
 
        page_mask = ~(((u64) page_size) - 1);
 
-       phys_pg_pack->pages = kcalloc(total_npages, sizeof(u64), GFP_KERNEL);
+       phys_pg_pack->pages = kvmalloc_array(total_npages, sizeof(u64),
+                                               GFP_KERNEL);
        if (!phys_pg_pack->pages) {
                rc = -ENOMEM;
                goto page_pack_arr_mem_err;
@@ -750,9 +752,9 @@ static int map_phys_page_pack(struct hl_ctx *ctx, u64 vaddr,
                struct hl_vm_phys_pg_pack *phys_pg_pack)
 {
        struct hl_device *hdev = ctx->hdev;
-       u64 next_vaddr = vaddr, paddr;
+       u64 next_vaddr = vaddr, paddr, mapped_pg_cnt = 0, i;
        u32 page_size = phys_pg_pack->page_size;
-       int i, rc = 0, mapped_pg_cnt = 0;
+       int rc = 0;
 
        for (i = 0 ; i < phys_pg_pack->npages ; i++) {
                paddr = phys_pg_pack->pages[i];
@@ -764,7 +766,7 @@ static int map_phys_page_pack(struct hl_ctx *ctx, u64 vaddr,
                rc = hl_mmu_map(ctx, next_vaddr, paddr, page_size);
                if (rc) {
                        dev_err(hdev->dev,
-                               "map failed for handle %u, npages: %d, mapped: %d",
+                               "map failed for handle %u, npages: %llu, mapped: %llu",
                                phys_pg_pack->handle, phys_pg_pack->npages,
                                mapped_pg_cnt);
                        goto err;
@@ -985,10 +987,10 @@ static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr)
        struct hl_vm_hash_node *hnode = NULL;
        struct hl_userptr *userptr = NULL;
        enum vm_type_t *vm_type;
-       u64 next_vaddr;
+       u64 next_vaddr, i;
        u32 page_size;
        bool is_userptr;
-       int i, rc;
+       int rc;
 
        /* protect from double entrance */
        mutex_lock(&ctx->mem_hash_lock);
index 2f2e99cb27439433bd4527350b2347a6856cab5d..3a5a2cec83051b08c1b838372aaf29c0f1b99e13 100644 (file)
@@ -832,7 +832,7 @@ static int _hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
 int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size)
 {
        struct hl_device *hdev = ctx->hdev;
-       u64 real_virt_addr;
+       u64 real_virt_addr, real_phys_addr;
        u32 real_page_size, npages;
        int i, rc, mapped_cnt = 0;
 
@@ -857,14 +857,16 @@ int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size)
 
        npages = page_size / real_page_size;
        real_virt_addr = virt_addr;
+       real_phys_addr = phys_addr;
 
        for (i = 0 ; i < npages ; i++) {
-               rc = _hl_mmu_map(ctx, real_virt_addr, phys_addr,
+               rc = _hl_mmu_map(ctx, real_virt_addr, real_phys_addr,
                                real_page_size);
                if (rc)
                        goto err;
 
                real_virt_addr += real_page_size;
+               real_phys_addr += real_page_size;
                mapped_cnt++;
        }
 
index 82a97866e0cf4c857cbb25c5487a3c6f80addfa0..7c8f203f9a24d38bbd1c2b870644e511a1db76d3 100644 (file)
@@ -48,7 +48,6 @@ struct alcor_sdmmc_host {
        struct mmc_command *cmd;
        struct mmc_data *data;
        unsigned int dma_on:1;
-       unsigned int early_data:1;
 
        struct mutex cmd_mutex;
 
@@ -144,8 +143,7 @@ static void alcor_data_set_dma(struct alcor_sdmmc_host *host)
        host->sg_count--;
 }
 
-static void alcor_trigger_data_transfer(struct alcor_sdmmc_host *host,
-                                       bool early)
+static void alcor_trigger_data_transfer(struct alcor_sdmmc_host *host)
 {
        struct alcor_pci_priv *priv = host->alcor_pci;
        struct mmc_data *data = host->data;
@@ -155,13 +153,6 @@ static void alcor_trigger_data_transfer(struct alcor_sdmmc_host *host,
                ctrl |= AU6601_DATA_WRITE;
 
        if (data->host_cookie == COOKIE_MAPPED) {
-               if (host->early_data) {
-                       host->early_data = false;
-                       return;
-               }
-
-               host->early_data = early;
-
                alcor_data_set_dma(host);
                ctrl |= AU6601_DATA_DMA_MODE;
                host->dma_on = 1;
@@ -231,6 +222,7 @@ static void alcor_prepare_sg_miter(struct alcor_sdmmc_host *host)
 static void alcor_prepare_data(struct alcor_sdmmc_host *host,
                               struct mmc_command *cmd)
 {
+       struct alcor_pci_priv *priv = host->alcor_pci;
        struct mmc_data *data = cmd->data;
 
        if (!data)
@@ -248,7 +240,7 @@ static void alcor_prepare_data(struct alcor_sdmmc_host *host,
        if (data->host_cookie != COOKIE_MAPPED)
                alcor_prepare_sg_miter(host);
 
-       alcor_trigger_data_transfer(host, true);
+       alcor_write8(priv, 0, AU6601_DATA_XFER_CTRL);
 }
 
 static void alcor_send_cmd(struct alcor_sdmmc_host *host,
@@ -435,7 +427,7 @@ static int alcor_cmd_irq_done(struct alcor_sdmmc_host *host, u32 intmask)
        if (!host->data)
                return false;
 
-       alcor_trigger_data_transfer(host, false);
+       alcor_trigger_data_transfer(host);
        host->cmd = NULL;
        return true;
 }
@@ -456,7 +448,7 @@ static void alcor_cmd_irq_thread(struct alcor_sdmmc_host *host, u32 intmask)
        if (!host->data)
                alcor_request_complete(host, 1);
        else
-               alcor_trigger_data_transfer(host, false);
+               alcor_trigger_data_transfer(host);
        host->cmd = NULL;
 }
 
@@ -487,15 +479,9 @@ static int alcor_data_irq_done(struct alcor_sdmmc_host *host, u32 intmask)
                break;
        case AU6601_INT_READ_BUF_RDY:
                alcor_trf_block_pio(host, true);
-               if (!host->blocks)
-                       break;
-               alcor_trigger_data_transfer(host, false);
                return 1;
        case AU6601_INT_WRITE_BUF_RDY:
                alcor_trf_block_pio(host, false);
-               if (!host->blocks)
-                       break;
-               alcor_trigger_data_transfer(host, false);
                return 1;
        case AU6601_INT_DMA_END:
                if (!host->sg_count)
@@ -508,8 +494,14 @@ static int alcor_data_irq_done(struct alcor_sdmmc_host *host, u32 intmask)
                break;
        }
 
-       if (intmask & AU6601_INT_DATA_END)
-               return 0;
+       if (intmask & AU6601_INT_DATA_END) {
+               if (!host->dma_on && host->blocks) {
+                       alcor_trigger_data_transfer(host);
+                       return 1;
+               } else {
+                       return 0;
+               }
+       }
 
        return 1;
 }
index 5bbed477c9b1ee6546f066e55fd9946ed4a160b6..9f20fff9781b0791ea36c1bd3cd657dd1b50e8c6 100644 (file)
@@ -797,6 +797,43 @@ void sdhci_omap_reset(struct sdhci_host *host, u8 mask)
        sdhci_reset(host, mask);
 }
 
+#define CMD_ERR_MASK (SDHCI_INT_CRC | SDHCI_INT_END_BIT | SDHCI_INT_INDEX |\
+                     SDHCI_INT_TIMEOUT)
+#define CMD_MASK (CMD_ERR_MASK | SDHCI_INT_RESPONSE)
+
+static u32 sdhci_omap_irq(struct sdhci_host *host, u32 intmask)
+{
+       struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+       struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
+
+       if (omap_host->is_tuning && host->cmd && !host->data_early &&
+           (intmask & CMD_ERR_MASK)) {
+
+               /*
+                * Since we are not resetting data lines during tuning
+                * operation, data error or data complete interrupts
+                * might still arrive. Mark this request as a failure
+                * but still wait for the data interrupt
+                */
+               if (intmask & SDHCI_INT_TIMEOUT)
+                       host->cmd->error = -ETIMEDOUT;
+               else
+                       host->cmd->error = -EILSEQ;
+
+               host->cmd = NULL;
+
+               /*
+                * Sometimes command error interrupts and command complete
+                * interrupt will arrive together. Clear all command related
+                * interrupts here.
+                */
+               sdhci_writel(host, intmask & CMD_MASK, SDHCI_INT_STATUS);
+               intmask &= ~CMD_MASK;
+       }
+
+       return intmask;
+}
+
 static struct sdhci_ops sdhci_omap_ops = {
        .set_clock = sdhci_omap_set_clock,
        .set_power = sdhci_omap_set_power,
@@ -807,6 +844,7 @@ static struct sdhci_ops sdhci_omap_ops = {
        .platform_send_init_74_clocks = sdhci_omap_init_74_clocks,
        .reset = sdhci_omap_reset,
        .set_uhs_signaling = sdhci_omap_set_uhs_signaling,
+       .irq = sdhci_omap_irq,
 };
 
 static int sdhci_omap_set_capabilities(struct sdhci_omap_host *omap_host)
index 72428b6bfc474ba6d757b94d79ff62804cc7c8ec..7b7286b4d81ef660d22a9ca93e5f0f2870ea5666 100644 (file)
@@ -1876,7 +1876,11 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
                        continue;
                }
 
-               if (time_after(jiffies, timeo) && !chip_ready(map, adr))
+               /*
+                * We check "time_after" and "!chip_good" before checking "chip_good" to avoid
+                * the failure due to scheduling.
+                */
+               if (time_after(jiffies, timeo) && !chip_good(map, adr, datum))
                        break;
 
                if (chip_good(map, adr, datum)) {
index 3d27616d9c85540304a8d78c4a2f050c0866b9a2..51cf5eca9c7f8de290d5498ef0b9a7f94190004c 100644 (file)
@@ -116,11 +116,15 @@ static struct net_device * __init ipddp_init(void)
  */
 static netdev_tx_t ipddp_xmit(struct sk_buff *skb, struct net_device *dev)
 {
-       __be32 paddr = skb_rtable(skb)->rt_gateway;
+        struct rtable *rtable = skb_rtable(skb);
+        __be32 paddr = 0;
         struct ddpehdr *ddp;
         struct ipddp_route *rt;
         struct atalk_addr *our_addr;
 
+       if (rtable->rt_gw_family == AF_INET)
+               paddr = rtable->rt_gw4;
+
        spin_lock(&ipddp_route_lock);
 
        /*
index 8ddbada9e281931447c3eb96fb784fb1776915b2..062fa7e3af4c267f68aa52cfeaa762994459cc41 100644 (file)
@@ -3213,8 +3213,12 @@ static int bond_netdev_event(struct notifier_block *this,
                return NOTIFY_DONE;
 
        if (event_dev->flags & IFF_MASTER) {
+               int ret;
+
                netdev_dbg(event_dev, "IFF_MASTER\n");
-               return bond_master_netdev_event(event, event_dev);
+               ret = bond_master_netdev_event(event, event_dev);
+               if (ret != NOTIFY_DONE)
+                       return ret;
        }
 
        if (event_dev->flags & IFF_SLAVE) {
index 2f120b2ffef0cfd7d97f6a901f9552fcc58288df..4985268e227330045e1cc0a6f3dadcb81e4c2830 100644 (file)
@@ -55,7 +55,9 @@ static SLAVE_ATTR_RO(link_failure_count);
 
 static ssize_t perm_hwaddr_show(struct slave *slave, char *buf)
 {
-       return sprintf(buf, "%pM\n", slave->perm_hwaddr);
+       return sprintf(buf, "%*phC\n",
+                      slave->dev->addr_len,
+                      slave->perm_hwaddr);
 }
 static SLAVE_ATTR_RO(perm_hwaddr);
 
index f16e1d7d8615d6191a42d158bf274a4e663e94d0..c026d15721f66bf3d03403b1f31d5362ec38bd9c 100644 (file)
@@ -1144,6 +1144,7 @@ static phy_interface_t ksz9477_get_interface(struct ksz_device *dev, int port)
                interface = PHY_INTERFACE_MODE_GMII;
                if (gbit)
                        break;
+               /* fall through */
        case 0:
                interface = PHY_INTERFACE_MODE_MII;
                break;
index dce84a2a65c71eeec36d10fa9ceb6df0a487866a..c44b2822e4dd064e2cba3a9ca1b0af457893e47e 100644 (file)
@@ -427,18 +427,22 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
                return 0;
 
        lane = mv88e6390x_serdes_get_lane(chip, port);
-       if (lane < 0)
+       if (lane < 0 && lane != -ENODEV)
                return lane;
 
-       if (chip->ports[port].serdes_irq) {
-               err = mv88e6390_serdes_irq_disable(chip, port, lane);
+       if (lane >= 0) {
+               if (chip->ports[port].serdes_irq) {
+                       err = mv88e6390_serdes_irq_disable(chip, port, lane);
+                       if (err)
+                               return err;
+               }
+
+               err = mv88e6390x_serdes_power(chip, port, false);
                if (err)
                        return err;
        }
 
-       err = mv88e6390x_serdes_power(chip, port, false);
-       if (err)
-               return err;
+       chip->ports[port].cmode = 0;
 
        if (cmode) {
                err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &reg);
@@ -452,6 +456,12 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
                if (err)
                        return err;
 
+               chip->ports[port].cmode = cmode;
+
+               lane = mv88e6390x_serdes_get_lane(chip, port);
+               if (lane < 0)
+                       return lane;
+
                err = mv88e6390x_serdes_power(chip, port, true);
                if (err)
                        return err;
@@ -463,8 +473,6 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
                }
        }
 
-       chip->ports[port].cmode = cmode;
-
        return 0;
 }
 
index 0d15a12a4560b37a4ed21d8e4bdd3bb853adb894..3568129fb7dae08f89a63f7ecc0ce98e165eb83b 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/kernel.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
+#include <linux/ethtool.h>
 #include <linux/init.h>
 #include <linux/moduleparam.h>
 #include <linux/rtnetlink.h>
@@ -131,21 +132,9 @@ static void dummy_get_drvinfo(struct net_device *dev,
        strlcpy(info->version, DRV_VERSION, sizeof(info->version));
 }
 
-static int dummy_get_ts_info(struct net_device *dev,
-                             struct ethtool_ts_info *ts_info)
-{
-       ts_info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
-                                  SOF_TIMESTAMPING_RX_SOFTWARE |
-                                  SOF_TIMESTAMPING_SOFTWARE;
-
-       ts_info->phc_index = -1;
-
-       return 0;
-};
-
 static const struct ethtool_ops dummy_ethtool_ops = {
        .get_drvinfo            = dummy_get_drvinfo,
-       .get_ts_info            = dummy_get_ts_info,
+       .get_ts_info            = ethtool_op_get_ts_info,
 };
 
 static void dummy_setup(struct net_device *dev)
index 71c8cac6e44e4dcc2a95472dd493527e0d4d13df..7e40d14682f7a78a8e4646afde29cf305101c73e 100644 (file)
@@ -2236,7 +2236,7 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
                }
        }
 
-       if (netif_xmit_stopped(txq) || !skb->xmit_more) {
+       if (netif_xmit_stopped(txq) || !netdev_xmit_more()) {
                /* trigger the dma engine. ena_com_write_sq_doorbell()
                 * has a mb
                 */
index 4666084eda16a318a4042b81c2d3a03cb7664a19..d5fd49dd25f336aedbe166a8c6bdd74ef6296204 100644 (file)
@@ -1887,7 +1887,7 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
        smp_wmb();
 
        ring->cur = cur_index + 1;
-       if (!packet->skb->xmit_more ||
+       if (!netdev_xmit_more() ||
            netif_xmit_stopped(netdev_get_tx_queue(pdata->netdev,
                                                   channel->queue_index)))
                xgbe_tx_start_xmit(channel, ring);
index 6f56276015a47d1069dd2dbcfd6335d7e74dcbb6..f62deeb6e9418859d52c7b379718942f9d92752d 100644 (file)
@@ -404,6 +404,7 @@ static int nb8800_xmit(struct sk_buff *skb, struct net_device *dev)
        unsigned int dma_len;
        unsigned int align;
        unsigned int next;
+       bool xmit_more;
 
        if (atomic_read(&priv->tx_free) <= NB8800_DESC_LOW) {
                netif_stop_queue(dev);
@@ -423,9 +424,10 @@ static int nb8800_xmit(struct sk_buff *skb, struct net_device *dev)
                return NETDEV_TX_OK;
        }
 
+       xmit_more = netdev_xmit_more();
        if (atomic_dec_return(&priv->tx_free) <= NB8800_DESC_LOW) {
                netif_stop_queue(dev);
-               skb->xmit_more = 0;
+               xmit_more = false;
        }
 
        next = priv->tx_next;
@@ -450,7 +452,7 @@ static int nb8800_xmit(struct sk_buff *skb, struct net_device *dev)
        desc->n_addr = priv->tx_bufs[next].dma_desc;
        desc->config = DESC_BTS(2) | DESC_DS | DESC_EOF | dma_len;
 
-       if (!skb->xmit_more)
+       if (!xmit_more)
                desc->config |= DESC_EOC;
 
        txb->skb = skb;
@@ -468,7 +470,7 @@ static int nb8800_xmit(struct sk_buff *skb, struct net_device *dev)
 
        priv->tx_next = next;
 
-       if (!skb->xmit_more) {
+       if (!xmit_more) {
                smp_wmb();
                priv->tx_chain->ready = true;
                priv->tx_chain = NULL;
index 626b491f7674fd7888ef2ba560231c999b2b1744..0d6c98a9e07bfd1ac62eacc4e45ffbb6a4495b91 100644 (file)
@@ -15376,27 +15376,47 @@ static int bnx2x_enable_ptp_packets(struct bnx2x *bp)
        return 0;
 }
 
+#define BNX2X_P2P_DETECT_PARAM_MASK 0x5F5
+#define BNX2X_P2P_DETECT_RULE_MASK 0x3DBB
+#define BNX2X_PTP_TX_ON_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x6AA)
+#define BNX2X_PTP_TX_ON_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3EEE)
+#define BNX2X_PTP_V1_L4_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x7EE)
+#define BNX2X_PTP_V1_L4_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3FFE)
+#define BNX2X_PTP_V2_L4_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x7EA)
+#define BNX2X_PTP_V2_L4_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3FEE)
+#define BNX2X_PTP_V2_L2_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x6BF)
+#define BNX2X_PTP_V2_L2_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3EFF)
+#define BNX2X_PTP_V2_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x6AA)
+#define BNX2X_PTP_V2_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3EEE)
+
 int bnx2x_configure_ptp_filters(struct bnx2x *bp)
 {
        int port = BP_PORT(bp);
+       u32 param, rule;
        int rc;
 
        if (!bp->hwtstamp_ioctl_called)
                return 0;
 
+       param = port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK :
+               NIG_REG_P0_TLLH_PTP_PARAM_MASK;
+       rule = port ? NIG_REG_P1_TLLH_PTP_RULE_MASK :
+               NIG_REG_P0_TLLH_PTP_RULE_MASK;
        switch (bp->tx_type) {
        case HWTSTAMP_TX_ON:
                bp->flags |= TX_TIMESTAMPING_EN;
-               REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK :
-                      NIG_REG_P0_TLLH_PTP_PARAM_MASK, 0x6AA);
-               REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK :
-                      NIG_REG_P0_TLLH_PTP_RULE_MASK, 0x3EEE);
+               REG_WR(bp, param, BNX2X_PTP_TX_ON_PARAM_MASK);
+               REG_WR(bp, rule, BNX2X_PTP_TX_ON_RULE_MASK);
                break;
        case HWTSTAMP_TX_ONESTEP_SYNC:
                BNX2X_ERR("One-step timestamping is not supported\n");
                return -ERANGE;
        }
 
+       param = port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
+               NIG_REG_P0_LLH_PTP_PARAM_MASK;
+       rule = port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
+               NIG_REG_P0_LLH_PTP_RULE_MASK;
        switch (bp->rx_filter) {
        case HWTSTAMP_FILTER_NONE:
                break;
@@ -15410,30 +15430,24 @@ int bnx2x_configure_ptp_filters(struct bnx2x *bp)
        case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
                bp->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
                /* Initialize PTP detection for UDP/IPv4 events */
-               REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
-                      NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7EE);
-               REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
-                      NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FFE);
+               REG_WR(bp, param, BNX2X_PTP_V1_L4_PARAM_MASK);
+               REG_WR(bp, rule, BNX2X_PTP_V1_L4_RULE_MASK);
                break;
        case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
        case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
        case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
                bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
                /* Initialize PTP detection for UDP/IPv4 or UDP/IPv6 events */
-               REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
-                      NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7EA);
-               REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
-                      NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FEE);
+               REG_WR(bp, param, BNX2X_PTP_V2_L4_PARAM_MASK);
+               REG_WR(bp, rule, BNX2X_PTP_V2_L4_RULE_MASK);
                break;
        case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
        case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
        case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
                bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
                /* Initialize PTP detection L2 events */
-               REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
-                      NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x6BF);
-               REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
-                      NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3EFF);
+               REG_WR(bp, param, BNX2X_PTP_V2_L2_PARAM_MASK);
+               REG_WR(bp, rule, BNX2X_PTP_V2_L2_RULE_MASK);
 
                break;
        case HWTSTAMP_FILTER_PTP_V2_EVENT:
@@ -15441,10 +15455,8 @@ int bnx2x_configure_ptp_filters(struct bnx2x *bp)
        case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
                bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
                /* Initialize PTP detection L2, UDP/IPv4 or UDP/IPv6 events */
-               REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
-                      NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x6AA);
-               REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
-                      NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3EEE);
+               REG_WR(bp, param, BNX2X_PTP_V2_PARAM_MASK);
+               REG_WR(bp, rule, BNX2X_PTP_V2_RULE_MASK);
                break;
        }
 
index a9bdc21873d32f31620ac169f8aff5b76cd02f7f..10ff37d6dc783b796c690a4d73bc90caa4cad931 100644 (file)
@@ -957,7 +957,7 @@ int bnx2x_vfpf_update_vlan(struct bnx2x *bp, u16 vid, u8 vf_qid, bool add)
        bnx2x_sample_bulletin(bp);
 
        if (bp->shadow_bulletin.content.valid_bitmap & 1 << VLAN_VALID) {
-               BNX2X_ERR("Hypervisor will dicline the request, avoiding\n");
+               BNX2X_ERR("Hypervisor will decline the request, avoiding\n");
                rc = -EINVAL;
                goto out;
        }
index 35e34e23ba33fb397cc4496ae4ff7025b0b9c918..6528a597367bd4ba7862b13964dd5960b685a165 100644 (file)
@@ -551,7 +551,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
        prod = NEXT_TX(prod);
        txr->tx_prod = prod;
 
-       if (!skb->xmit_more || netif_xmit_stopped(txq))
+       if (!netdev_xmit_more() || netif_xmit_stopped(txq))
                bnxt_db_write(bp, &txr->tx_db, prod);
 
 tx_done:
@@ -559,7 +559,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
        mmiowb();
 
        if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
-               if (skb->xmit_more && !tx_buf->is_push)
+               if (netdev_xmit_more() && !tx_buf->is_push)
                        bnxt_db_write(bp, &txr->tx_db, prod);
 
                netif_tx_stop_queue(txq);
@@ -1133,6 +1133,8 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
        tpa_info = &rxr->rx_tpa[agg_id];
 
        if (unlikely(cons != rxr->rx_next_cons)) {
+               netdev_warn(bp->dev, "TPA cons %x != expected cons %x\n",
+                           cons, rxr->rx_next_cons);
                bnxt_sched_reset(bp, rxr);
                return;
        }
@@ -1585,15 +1587,17 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
        }
 
        cons = rxcmp->rx_cmp_opaque;
-       rx_buf = &rxr->rx_buf_ring[cons];
-       data = rx_buf->data;
-       data_ptr = rx_buf->data_ptr;
        if (unlikely(cons != rxr->rx_next_cons)) {
                int rc1 = bnxt_discard_rx(bp, cpr, raw_cons, rxcmp);
 
+               netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
+                           cons, rxr->rx_next_cons);
                bnxt_sched_reset(bp, rxr);
                return rc1;
        }
+       rx_buf = &rxr->rx_buf_ring[cons];
+       data = rx_buf->data;
+       data_ptr = rx_buf->data_ptr;
        prefetch(data_ptr);
 
        misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
@@ -1610,11 +1614,17 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
 
        rx_buf->data = NULL;
        if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
+               u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2);
+
                bnxt_reuse_rx_data(rxr, cons, data);
                if (agg_bufs)
                        bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs);
 
                rc = -EIO;
+               if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
+                       netdev_warn(bp->dev, "RX buffer error %x\n", rx_err);
+                       bnxt_sched_reset(bp, rxr);
+               }
                goto next_rx;
        }
 
@@ -10104,7 +10114,6 @@ static const struct net_device_ops bnxt_netdev_ops = {
        .ndo_bpf                = bnxt_xdp,
        .ndo_bridge_getlink     = bnxt_bridge_getlink,
        .ndo_bridge_setlink     = bnxt_bridge_setlink,
-       .ndo_get_port_parent_id = bnxt_get_port_parent_id,
        .ndo_get_devlink_port   = bnxt_get_devlink_port,
 };
 
@@ -10429,6 +10438,26 @@ static int bnxt_init_mac_addr(struct bnxt *bp)
        return rc;
 }
 
+static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
+{
+       struct pci_dev *pdev = bp->pdev;
+       int pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN);
+       u32 dw;
+
+       if (!pos) {
+               netdev_info(bp->dev, "Unable do read adapter's DSN");
+               return -EOPNOTSUPP;
+       }
+
+       /* DSN (two dw) is at an offset of 4 from the cap pos */
+       pos += 4;
+       pci_read_config_dword(pdev, pos, &dw);
+       put_unaligned_le32(dw, &dsn[0]);
+       pci_read_config_dword(pdev, pos + 4, &dw);
+       put_unaligned_le32(dw, &dsn[4]);
+       return 0;
+}
+
 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
        static int version_printed;
@@ -10569,6 +10598,11 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                goto init_err_pci_clean;
        }
 
+       /* Read the adapter's DSN to use as the eswitch switch_id */
+       rc = bnxt_pcie_dsn_get(bp, bp->switch_id);
+       if (rc)
+               goto init_err_pci_clean;
+
        bnxt_hwrm_func_qcfg(bp);
        bnxt_hwrm_vnic_qcaps(bp);
        bnxt_hwrm_port_led_qcaps(bp);
index ab6fd05c462bd975e0e0e38c5c7f0408d46eb277..549c90d3e465f2fd4f7eb494cee47373b953aaf5 100644 (file)
@@ -230,7 +230,8 @@ int bnxt_dl_register(struct bnxt *bp)
        }
 
        devlink_port_attrs_set(&bp->dl_port, DEVLINK_PORT_FLAVOUR_PHYSICAL,
-                              bp->pf.port_id, false, 0);
+                              bp->pf.port_id, false, 0,
+                              bp->switch_id, sizeof(bp->switch_id));
        rc = devlink_port_register(dl, &bp->dl_port, bp->pf.port_id);
        if (rc) {
                netdev_err(bp->dev, "devlink_port_register failed");
index 2bdd2da9aac7d5777a111b616cf2ecf178ecbe1c..f760921389a304214f854a4604f6233e6eafbfe5 100644 (file)
@@ -406,26 +406,6 @@ static void bnxt_vf_rep_netdev_init(struct bnxt *bp, struct bnxt_vf_rep *vf_rep,
        dev->min_mtu = ETH_ZLEN;
 }
 
-static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
-{
-       struct pci_dev *pdev = bp->pdev;
-       int pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN);
-       u32 dw;
-
-       if (!pos) {
-               netdev_info(bp->dev, "Unable do read adapter's DSN");
-               return -EOPNOTSUPP;
-       }
-
-       /* DSN (two dw) is at an offset of 4 from the cap pos */
-       pos += 4;
-       pci_read_config_dword(pdev, pos, &dw);
-       put_unaligned_le32(dw, &dsn[0]);
-       pci_read_config_dword(pdev, pos + 4, &dw);
-       put_unaligned_le32(dw, &dsn[4]);
-       return 0;
-}
-
 static int bnxt_vf_reps_create(struct bnxt *bp)
 {
        u16 *cfa_code_map = NULL, num_vfs = pci_num_vf(bp->pdev);
@@ -490,11 +470,6 @@ static int bnxt_vf_reps_create(struct bnxt *bp)
                }
        }
 
-       /* Read the adapter's DSN to use as the eswitch switch_id */
-       rc = bnxt_pcie_dsn_get(bp, bp->switch_id);
-       if (rc)
-               goto err;
-
        /* publish cfa_code_map only after all VF-reps have been initialized */
        bp->cfa_code_map = cfa_code_map;
        bp->eswitch_mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
index 983245c0867c268be95b1ae52f57a2c0b4051507..4fd973571e4cd245d819c75f345de737d0fb4269 100644 (file)
@@ -1665,7 +1665,7 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
        if (ring->free_bds <= (MAX_SKB_FRAGS + 1))
                netif_tx_stop_queue(txq);
 
-       if (!skb->xmit_more || netif_xmit_stopped(txq))
+       if (!netdev_xmit_more() || netif_xmit_stopped(txq))
                /* Packets are ready, update producer index */
                bcmgenet_tdma_ring_writel(priv, ring->index,
                                          ring->prod_index, TDMA_PROD_INDEX);
index 328373e0578ff83bf5d5bb336103deba33144e99..664fedf0cd802bf287436e2ffd6eb187d427cad3 100644 (file)
@@ -4283,7 +4283,7 @@ static void tg3_power_down(struct tg3 *tp)
        pci_set_power_state(tp->pdev, PCI_D3hot);
 }
 
-static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
+static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u32 *speed, u8 *duplex)
 {
        switch (val & MII_TG3_AUX_STAT_SPDMASK) {
        case MII_TG3_AUX_STAT_10HALF:
@@ -4787,7 +4787,7 @@ static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
        bool current_link_up;
        u32 bmsr, val;
        u32 lcl_adv, rmt_adv;
-       u16 current_speed;
+       u32 current_speed;
        u8 current_duplex;
        int i, err;
 
@@ -5719,7 +5719,7 @@ static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
 {
        u32 orig_pause_cfg;
-       u16 orig_active_speed;
+       u32 orig_active_speed;
        u8 orig_active_duplex;
        u32 mac_status;
        bool current_link_up;
@@ -5823,7 +5823,7 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
 {
        int err = 0;
        u32 bmsr, bmcr;
-       u16 current_speed = SPEED_UNKNOWN;
+       u32 current_speed = SPEED_UNKNOWN;
        u8 current_duplex = DUPLEX_UNKNOWN;
        bool current_link_up = false;
        u32 local_adv, remote_adv, sgsr;
@@ -8156,7 +8156,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
                        netif_tx_wake_queue(txq);
        }
 
-       if (!skb->xmit_more || netif_xmit_stopped(txq)) {
+       if (!netdev_xmit_more() || netif_xmit_stopped(txq)) {
                /* Packets are ready, update Tx producer idx on card. */
                tw32_tx_mbox(tnapi->prodmbox, entry);
                mmiowb();
@@ -12763,9 +12763,6 @@ static int tg3_set_phys_id(struct net_device *dev,
 {
        struct tg3 *tp = netdev_priv(dev);
 
-       if (!netif_running(tp->dev))
-               return -EAGAIN;
-
        switch (state) {
        case ETHTOOL_ID_ACTIVE:
                return 1;       /* cycle on/off once per second */
index a772a33b685c5eb8c28137107eb33cb4b6ffeb1d..6953d0546acb320196887a51d07312977d4f49ed 100644 (file)
@@ -2873,7 +2873,7 @@ struct tg3_tx_ring_info {
 struct tg3_link_config {
        /* Describes what we're trying to get. */
        u32                             advertising;
-       u16                             speed;
+       u32                             speed;
        u8                              duplex;
        u8                              autoneg;
        u8                              flowctrl;
@@ -2882,7 +2882,7 @@ struct tg3_link_config {
        u8                              active_flowctrl;
 
        u8                              active_duplex;
-       u16                             active_speed;
+       u32                             active_speed;
        u32                             rmt_adv;
 };
 
index a44171fddf47b03582327de049cff998c0b45d18..009ed4c1baf300cff7e9dbe71be7332658169305 100644 (file)
@@ -898,7 +898,9 @@ static void macb_tx_interrupt(struct macb_queue *queue)
 
                        /* First, update TX stats if needed */
                        if (skb) {
-                               if (gem_ptp_do_txstamp(queue, skb, desc) == 0) {
+                               if (unlikely(skb_shinfo(skb)->tx_flags &
+                                            SKBTX_HW_TSTAMP) &&
+                                   gem_ptp_do_txstamp(queue, skb, desc) == 0) {
                                        /* skb now belongs to timestamp buffer
                                         * and will be removed later
                                         */
index fb6f813cff65dcb7f608d4e1b859be9f38a6b13d..eab805579f9646b42a30e71757e3f6d6432ba7e2 100644 (file)
@@ -2522,7 +2522,7 @@ static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
                irh->vlan = skb_vlan_tag_get(skb) & 0xfff;
        }
 
-       xmit_more = skb->xmit_more;
+       xmit_more = netdev_xmit_more();
 
        if (unlikely(cmdsetup.s.timestamp))
                status = send_nic_timestamp_pkt(oct, &ndata, finfo, xmit_more);
index 54b245797d2e61e1844a25c9674255c2dd80e69c..db0b90555acbcb368626757b803474710d135082 100644 (file)
@@ -1585,7 +1585,7 @@ static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
                irh->vlan = skb_vlan_tag_get(skb) & VLAN_VID_MASK;
        }
 
-       xmit_more = skb->xmit_more;
+       xmit_more = netdev_xmit_more();
 
        if (unlikely(cmdsetup.s.timestamp))
                status = send_nic_timestamp_pkt(oct, &ndata, finfo, xmit_more);
index aa2be480719134f720e9487a3c71b4272cc8efe3..c032bef1b776d74ea4886e8fbddca40c8b7dd868 100644 (file)
 #define DRV_NAME       "nicvf"
 #define DRV_VERSION    "1.0"
 
+/* NOTE: Packets bigger than 1530 are split across multiple pages and XDP needs
+ * the buffer to be contiguous. Allow XDP to be set up only if we don't exceed
+ * this value, keeping headroom for the 14 byte Ethernet header and two
+ * VLAN tags (for QinQ)
+ */
+#define MAX_XDP_MTU    (1530 - ETH_HLEN - VLAN_HLEN * 2)
+
 /* Supported devices */
 static const struct pci_device_id nicvf_id_table[] = {
        { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
@@ -1328,10 +1335,11 @@ int nicvf_stop(struct net_device *netdev)
        struct nicvf_cq_poll *cq_poll = NULL;
        union nic_mbx mbx = {};
 
-       cancel_delayed_work_sync(&nic->link_change_work);
-
        /* wait till all queued set_rx_mode tasks completes */
-       drain_workqueue(nic->nicvf_rx_mode_wq);
+       if (nic->nicvf_rx_mode_wq) {
+               cancel_delayed_work_sync(&nic->link_change_work);
+               drain_workqueue(nic->nicvf_rx_mode_wq);
+       }
 
        mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN;
        nicvf_send_msg_to_pf(nic, &mbx);
@@ -1452,7 +1460,8 @@ int nicvf_open(struct net_device *netdev)
        struct nicvf_cq_poll *cq_poll = NULL;
 
        /* wait till all queued set_rx_mode tasks completes if any */
-       drain_workqueue(nic->nicvf_rx_mode_wq);
+       if (nic->nicvf_rx_mode_wq)
+               drain_workqueue(nic->nicvf_rx_mode_wq);
 
        netif_carrier_off(netdev);
 
@@ -1550,10 +1559,12 @@ int nicvf_open(struct net_device *netdev)
        /* Send VF config done msg to PF */
        nicvf_send_cfg_done(nic);
 
-       INIT_DELAYED_WORK(&nic->link_change_work,
-                         nicvf_link_status_check_task);
-       queue_delayed_work(nic->nicvf_rx_mode_wq,
-                          &nic->link_change_work, 0);
+       if (nic->nicvf_rx_mode_wq) {
+               INIT_DELAYED_WORK(&nic->link_change_work,
+                                 nicvf_link_status_check_task);
+               queue_delayed_work(nic->nicvf_rx_mode_wq,
+                                  &nic->link_change_work, 0);
+       }
 
        return 0;
 cleanup:
@@ -1578,6 +1589,15 @@ static int nicvf_change_mtu(struct net_device *netdev, int new_mtu)
        struct nicvf *nic = netdev_priv(netdev);
        int orig_mtu = netdev->mtu;
 
+       /* For now just support only the usual MTU sized frames,
+        * plus some headroom for VLAN, QinQ.
+        */
+       if (nic->xdp_prog && new_mtu > MAX_XDP_MTU) {
+               netdev_warn(netdev, "Jumbo frames not yet supported with XDP, current MTU %d.\n",
+                           netdev->mtu);
+               return -EINVAL;
+       }
+
        netdev->mtu = new_mtu;
 
        if (!netif_running(netdev))
@@ -1826,8 +1846,10 @@ static int nicvf_xdp_setup(struct nicvf *nic, struct bpf_prog *prog)
        bool bpf_attached = false;
        int ret = 0;
 
-       /* For now just support only the usual MTU sized frames */
-       if (prog && (dev->mtu > 1500)) {
+       /* For now just support only the usual MTU sized frames,
+        * plus some headroom for VLAN, QinQ.
+        */
+       if (prog && dev->mtu > MAX_XDP_MTU) {
                netdev_warn(dev, "Jumbo frames not yet supported with XDP, current MTU %d.\n",
                            dev->mtu);
                return -EOPNOTSUPP;
index 5b4d3badcb730b1417739d508bae8b3838afaaf9..e246f9733bb89161ceb2c6d39fcbe330b469ca61 100644 (file)
@@ -105,20 +105,19 @@ static inline struct pgcache *nicvf_alloc_page(struct nicvf *nic,
        /* Check if page can be recycled */
        if (page) {
                ref_count = page_ref_count(page);
-               /* Check if this page has been used once i.e 'put_page'
-                * called after packet transmission i.e internal ref_count
-                * and page's ref_count are equal i.e page can be recycled.
+               /* This page can be recycled if internal ref_count and page's
+                * ref_count are equal, indicating that the page has been used
+                * once for packet transmission. For non-XDP mode, internal
+                * ref_count is always '1'.
                 */
-               if (rbdr->is_xdp && (ref_count == pgcache->ref_count))
-                       pgcache->ref_count--;
-               else
-                       page = NULL;
-
-               /* In non-XDP mode, page's ref_count needs to be '1' for it
-                * to be recycled.
-                */
-               if (!rbdr->is_xdp && (ref_count != 1))
+               if (rbdr->is_xdp) {
+                       if (ref_count == pgcache->ref_count)
+                               pgcache->ref_count--;
+                       else
+                               page = NULL;
+               } else if (ref_count != 1) {
                        page = NULL;
+               }
        }
 
        if (!page) {
@@ -365,11 +364,10 @@ static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
        while (head < rbdr->pgcnt) {
                pgcache = &rbdr->pgcache[head];
                if (pgcache->page && page_ref_count(pgcache->page) != 0) {
-                       if (!rbdr->is_xdp) {
-                               put_page(pgcache->page);
-                               continue;
+                       if (rbdr->is_xdp) {
+                               page_ref_sub(pgcache->page,
+                                            pgcache->ref_count - 1);
                        }
-                       page_ref_sub(pgcache->page, pgcache->ref_count - 1);
                        put_page(pgcache->page);
                }
                head++;
index 673c57b8023fe3e2113cc1121f15d6700d12faa1..81c281ada63b32bab35fa1da242852c0bb83135f 100644 (file)
@@ -962,13 +962,13 @@ static void bgx_poll_for_sgmii_link(struct lmac *lmac)
        lmac->last_duplex = (an_result >> 1) & 0x1;
        switch (speed) {
        case 0:
-               lmac->last_speed = 10;
+               lmac->last_speed = SPEED_10;
                break;
        case 1:
-               lmac->last_speed = 100;
+               lmac->last_speed = SPEED_100;
                break;
        case 2:
-               lmac->last_speed = 1000;
+               lmac->last_speed = SPEED_1000;
                break;
        default:
                lmac->link_up = false;
@@ -1012,10 +1012,10 @@ static void bgx_poll_for_link(struct work_struct *work)
            !(smu_link & SMU_RX_CTL_STATUS)) {
                lmac->link_up = 1;
                if (lmac->lmac_type == BGX_MODE_XLAUI)
-                       lmac->last_speed = 40000;
+                       lmac->last_speed = SPEED_40000;
                else
-                       lmac->last_speed = 10000;
-               lmac->last_duplex = 1;
+                       lmac->last_speed = SPEED_10000;
+               lmac->last_duplex = DUPLEX_FULL;
        } else {
                lmac->link_up = 0;
                lmac->last_speed = SPEED_UNKNOWN;
@@ -1105,8 +1105,8 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
                        } else {
                                /* Default to below link speed and duplex */
                                lmac->link_up = true;
-                               lmac->last_speed = 1000;
-                               lmac->last_duplex = 1;
+                               lmac->last_speed = SPEED_1000;
+                               lmac->last_duplex = DUPLEX_FULL;
                                bgx_sgmii_change_link_state(lmac);
                                return 0;
                        }
index 0e9182d3f02c665306508d7b11278fb457292ede..b3e4118a15e700364292aba4c343f3dc67d9e849 100644 (file)
@@ -443,9 +443,9 @@ void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh)
 struct l2t_data *t3_init_l2t(unsigned int l2t_capacity)
 {
        struct l2t_data *d;
-       int i, size = sizeof(*d) + l2t_capacity * sizeof(struct l2t_entry);
+       int i;
 
-       d = kvzalloc(size, GFP_KERNEL);
+       d = kvzalloc(struct_size(d, l2tab, l2t_capacity), GFP_KERNEL);
        if (!d)
                return NULL;
 
index c2fd323c4078284ac84aa58825cc84958f108fe8..ea75f275023ffad95a4e2dfa46b1db51935417d8 100644 (file)
@@ -75,8 +75,8 @@ struct l2t_data {
        struct l2t_entry *rover;        /* starting point for next allocation */
        atomic_t nfree;         /* number of free entries */
        rwlock_t lock;
-       struct l2t_entry l2tab[0];
        struct rcu_head rcu_head;       /* to handle rcu cleanup */
+       struct l2t_entry l2tab[];
 };
 
 typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
index 5afb430000499ba55cad2da1f7a5619ff823d9be..93ad4bee3401e9d58ff75add621ff4b1ccb72087 100644 (file)
@@ -1722,12 +1722,13 @@ void hash_filter_rpl(struct adapter *adap, const struct cpl_act_open_rpl *rpl)
                break;
 
        default:
-               dev_err(adap->pdev_dev, "%s: filter creation PROBLEM; status = %u\n",
-                       __func__, status);
+               if (status != CPL_ERR_TCAM_FULL)
+                       dev_err(adap->pdev_dev, "%s: filter creation PROBLEM; status = %u\n",
+                               __func__, status);
 
                if (ctx) {
                        if (status == CPL_ERR_TCAM_FULL)
-                               ctx->result = -EAGAIN;
+                               ctx->result = -ENOSPC;
                        else
                                ctx->result = -EINVAL;
                }
index 82a8d19700602e6922765d647030a69d971de5ae..6e2d80008a79497bcc8b1bcb9206220d6308019c 100644 (file)
@@ -687,11 +687,8 @@ int cxgb4_tc_flower_replace(struct net_device *dev,
 
        ret = ctx.result;
        /* Check if hw returned error for filter creation */
-       if (ret) {
-               netdev_err(dev, "%s: filter creation err %d\n",
-                          __func__, ret);
+       if (ret)
                goto free_entry;
-       }
 
        ch_flower->tc_flower_cookie = cls->cookie;
        ch_flower->filter_id = ctx.tid;
index 74849be5f004f59552892cf642a9b02efb393ac7..e2919005ead3e1592999b140841b0234344e87b7 100644 (file)
@@ -354,7 +354,10 @@ static struct cxgbi_ppm_pool *ppm_alloc_cpu_pool(unsigned int *total,
                ppmax = max;
 
        /* pool size must be multiple of unsigned long */
-       bmap = BITS_TO_LONGS(ppmax);
+       bmap = ppmax / BITS_PER_TYPE(unsigned long);
+       if (!bmap)
+               return NULL;
+
        ppmax = (bmap * sizeof(unsigned long)) << 3;
 
        alloc_sz = sizeof(*pools) + sizeof(unsigned long) * bmap;
@@ -402,6 +405,10 @@ int cxgbi_ppm_init(void **ppm_pp, struct net_device *ndev,
        if (reserve_factor) {
                ppmax_pool = ppmax / reserve_factor;
                pool = ppm_alloc_cpu_pool(&ppmax_pool, &pool_index_max);
+               if (!pool) {
+                       ppmax_pool = 0;
+                       reserve_factor = 0;
+               }
 
                pr_debug("%s: ppmax %u, cpu total %u, per cpu %u.\n",
                         ndev->name, ppmax, ppmax_pool, pool_index_max);
index 733d9172425bf46398714c8a003c99fdfbd91f67..acb2856936d2027babd1c06b02896a91b4d3d2e5 100644 (file)
@@ -897,7 +897,7 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
        if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)
                netif_tx_stop_queue(txq);
        skb_tx_timestamp(skb);
-       if (!skb->xmit_more || netif_xmit_stopped(txq))
+       if (!netdev_xmit_more() || netif_xmit_stopped(txq))
                vnic_wq_doorbell(wq);
 
        spin_unlock(&enic->wq_lock[txq_map]);
index 3c7c04406a2bfa55d1f3c67e0019d488c7dff4a8..e2f9fbced17457b481a3e9ac2f1a9bc358a66b59 100644 (file)
@@ -1376,7 +1376,7 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
        u16 q_idx = skb_get_queue_mapping(skb);
        struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
        struct be_wrb_params wrb_params = { 0 };
-       bool flush = !skb->xmit_more;
+       bool flush = !netdev_xmit_more();
        u16 wrb_cnt;
 
        skb = be_xmit_workarounds(adapter, skb, &wrb_params);
index 2055c97dc22bea5fa51aac41169d39c1d575ccb0..63b1ecc18c26f7833988394835ee482a844ff6b8 100644 (file)
@@ -2571,10 +2571,12 @@ static const struct dpaa2_eth_dist_fields dist_fields[] = {
                .rxnfc_field = RXH_L2DA,
                .cls_prot = NET_PROT_ETH,
                .cls_field = NH_FLD_ETH_DA,
+               .id = DPAA2_ETH_DIST_ETHDST,
                .size = 6,
        }, {
                .cls_prot = NET_PROT_ETH,
                .cls_field = NH_FLD_ETH_SA,
+               .id = DPAA2_ETH_DIST_ETHSRC,
                .size = 6,
        }, {
                /* This is the last ethertype field parsed:
@@ -2583,28 +2585,33 @@ static const struct dpaa2_eth_dist_fields dist_fields[] = {
                 */
                .cls_prot = NET_PROT_ETH,
                .cls_field = NH_FLD_ETH_TYPE,
+               .id = DPAA2_ETH_DIST_ETHTYPE,
                .size = 2,
        }, {
                /* VLAN header */
                .rxnfc_field = RXH_VLAN,
                .cls_prot = NET_PROT_VLAN,
                .cls_field = NH_FLD_VLAN_TCI,
+               .id = DPAA2_ETH_DIST_VLAN,
                .size = 2,
        }, {
                /* IP header */
                .rxnfc_field = RXH_IP_SRC,
                .cls_prot = NET_PROT_IP,
                .cls_field = NH_FLD_IP_SRC,
+               .id = DPAA2_ETH_DIST_IPSRC,
                .size = 4,
        }, {
                .rxnfc_field = RXH_IP_DST,
                .cls_prot = NET_PROT_IP,
                .cls_field = NH_FLD_IP_DST,
+               .id = DPAA2_ETH_DIST_IPDST,
                .size = 4,
        }, {
                .rxnfc_field = RXH_L3_PROTO,
                .cls_prot = NET_PROT_IP,
                .cls_field = NH_FLD_IP_PROTO,
+               .id = DPAA2_ETH_DIST_IPPROTO,
                .size = 1,
        }, {
                /* Using UDP ports, this is functionally equivalent to raw
@@ -2613,11 +2620,13 @@ static const struct dpaa2_eth_dist_fields dist_fields[] = {
                .rxnfc_field = RXH_L4_B_0_1,
                .cls_prot = NET_PROT_UDP,
                .cls_field = NH_FLD_UDP_PORT_SRC,
+               .id = DPAA2_ETH_DIST_L4SRC,
                .size = 2,
        }, {
                .rxnfc_field = RXH_L4_B_2_3,
                .cls_prot = NET_PROT_UDP,
                .cls_field = NH_FLD_UDP_PORT_DST,
+               .id = DPAA2_ETH_DIST_L4DST,
                .size = 2,
        },
 };
@@ -2683,12 +2692,15 @@ static int config_cls_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
 }
 
 /* Size of the Rx flow classification key */
-int dpaa2_eth_cls_key_size(void)
+int dpaa2_eth_cls_key_size(u64 fields)
 {
        int i, size = 0;
 
-       for (i = 0; i < ARRAY_SIZE(dist_fields); i++)
+       for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
+               if (!(fields & dist_fields[i].id))
+                       continue;
                size += dist_fields[i].size;
+       }
 
        return size;
 }
@@ -2709,6 +2721,24 @@ int dpaa2_eth_cls_fld_off(int prot, int field)
        return 0;
 }
 
+/* Prune unused fields from the classification rule.
+ * Used when masking is not supported
+ */
+void dpaa2_eth_cls_trim_rule(void *key_mem, u64 fields)
+{
+       int off = 0, new_off = 0;
+       int i, size;
+
+       for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
+               size = dist_fields[i].size;
+               if (dist_fields[i].id & fields) {
+                       memcpy(key_mem + new_off, key_mem + off, size);
+                       new_off += size;
+               }
+               off += size;
+       }
+}
+
 /* Set Rx distribution (hash or flow classification) key
  * flags is a combination of RXH_ bits
  */
@@ -2730,14 +2760,13 @@ static int dpaa2_eth_set_dist_key(struct net_device *net_dev,
                struct dpkg_extract *key =
                        &cls_cfg.extracts[cls_cfg.num_extracts];
 
-               /* For Rx hashing key we set only the selected fields.
-                * For Rx flow classification key we set all supported fields
+               /* For both Rx hashing and classification keys
+                * we set only the selected fields.
                 */
-               if (type == DPAA2_ETH_RX_DIST_HASH) {
-                       if (!(flags & dist_fields[i].rxnfc_field))
-                               continue;
+               if (!(flags & dist_fields[i].id))
+                       continue;
+               if (type == DPAA2_ETH_RX_DIST_HASH)
                        rx_hash_fields |= dist_fields[i].rxnfc_field;
-               }
 
                if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
                        dev_err(dev, "error adding key extraction rule, too many rules?\n");
@@ -2792,16 +2821,28 @@ static int dpaa2_eth_set_dist_key(struct net_device *net_dev,
 int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
 {
        struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+       u64 key = 0;
+       int i;
 
        if (!dpaa2_eth_hash_enabled(priv))
                return -EOPNOTSUPP;
 
-       return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_HASH, flags);
+       for (i = 0; i < ARRAY_SIZE(dist_fields); i++)
+               if (dist_fields[i].rxnfc_field & flags)
+                       key |= dist_fields[i].id;
+
+       return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_HASH, key);
 }
 
-static int dpaa2_eth_set_cls(struct dpaa2_eth_priv *priv)
+int dpaa2_eth_set_cls(struct net_device *net_dev, u64 flags)
+{
+       return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_CLS, flags);
+}
+
+static int dpaa2_eth_set_default_cls(struct dpaa2_eth_priv *priv)
 {
        struct device *dev = priv->net_dev->dev.parent;
+       int err;
 
        /* Check if we actually support Rx flow classification */
        if (dpaa2_eth_has_legacy_dist(priv)) {
@@ -2809,8 +2850,7 @@ static int dpaa2_eth_set_cls(struct dpaa2_eth_priv *priv)
                return -EOPNOTSUPP;
        }
 
-       if (priv->dpni_attrs.options & DPNI_OPT_NO_FS ||
-           !(priv->dpni_attrs.options & DPNI_OPT_HAS_KEY_MASKING)) {
+       if (!dpaa2_eth_fs_enabled(priv)) {
                dev_dbg(dev, "Rx cls disabled in DPNI options\n");
                return -EOPNOTSUPP;
        }
@@ -2820,9 +2860,21 @@ static int dpaa2_eth_set_cls(struct dpaa2_eth_priv *priv)
                return -EOPNOTSUPP;
        }
 
+       /* If there is no support for masking in the classification table,
+        * we don't set a default key, as it will depend on the rules
+        * added by the user at runtime.
+        */
+       if (!dpaa2_eth_fs_mask_enabled(priv))
+               goto out;
+
+       err = dpaa2_eth_set_cls(priv->net_dev, DPAA2_ETH_DIST_ALL);
+       if (err)
+               return err;
+
+out:
        priv->rx_cls_enabled = 1;
 
-       return dpaa2_eth_set_dist_key(priv->net_dev, DPAA2_ETH_RX_DIST_CLS, 0);
+       return 0;
 }
 
 /* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs,
@@ -2857,7 +2909,7 @@ static int bind_dpni(struct dpaa2_eth_priv *priv)
        /* Configure the flow classification key; it includes all
         * supported header fields and cannot be modified at runtime
         */
-       err = dpaa2_eth_set_cls(priv);
+       err = dpaa2_eth_set_default_cls(priv);
        if (err && err != -EOPNOTSUPP)
                dev_err(dev, "Failed to configure Rx classification key\n");
 
index a11ebfdc4a2329e169bd6a7978aa5b1c4413d597..5fb8f5c0dc9f8572bc7145706d29cdfba74e001f 100644 (file)
@@ -342,6 +342,7 @@ struct dpaa2_eth_dist_fields {
        enum net_prot cls_prot;
        int cls_field;
        int size;
+       u64 id;
 };
 
 struct dpaa2_eth_cls_rule {
@@ -394,6 +395,7 @@ struct dpaa2_eth_priv {
 
        /* enabled ethtool hashing bits */
        u64 rx_hash_fields;
+       u64 rx_cls_fields;
        struct dpaa2_eth_cls_rule *cls_rules;
        u8 rx_cls_enabled;
        struct bpf_prog *xdp_prog;
@@ -437,6 +439,12 @@ static inline int dpaa2_eth_cmp_dpni_ver(struct dpaa2_eth_priv *priv,
        (dpaa2_eth_cmp_dpni_ver((priv), DPNI_RX_DIST_KEY_VER_MAJOR,     \
                                DPNI_RX_DIST_KEY_VER_MINOR) < 0)
 
+#define dpaa2_eth_fs_enabled(priv)     \
+       (!((priv)->dpni_attrs.options & DPNI_OPT_NO_FS))
+
+#define dpaa2_eth_fs_mask_enabled(priv)        \
+       ((priv)->dpni_attrs.options & DPNI_OPT_HAS_KEY_MASKING)
+
 #define dpaa2_eth_fs_count(priv)        \
        ((priv)->dpni_attrs.fs_entries)
 
@@ -449,6 +457,18 @@ enum dpaa2_eth_rx_dist {
        DPAA2_ETH_RX_DIST_CLS
 };
 
+/* Unique IDs for the supported Rx classification header fields */
+#define DPAA2_ETH_DIST_ETHDST          BIT(0)
+#define DPAA2_ETH_DIST_ETHSRC          BIT(1)
+#define DPAA2_ETH_DIST_ETHTYPE         BIT(2)
+#define DPAA2_ETH_DIST_VLAN            BIT(3)
+#define DPAA2_ETH_DIST_IPSRC           BIT(4)
+#define DPAA2_ETH_DIST_IPDST           BIT(5)
+#define DPAA2_ETH_DIST_IPPROTO         BIT(6)
+#define DPAA2_ETH_DIST_L4SRC           BIT(7)
+#define DPAA2_ETH_DIST_L4DST           BIT(8)
+#define DPAA2_ETH_DIST_ALL             (~0U)
+
 static inline
 unsigned int dpaa2_eth_needed_headroom(struct dpaa2_eth_priv *priv,
                                       struct sk_buff *skb)
@@ -483,7 +503,9 @@ static inline unsigned int dpaa2_eth_rx_head_room(struct dpaa2_eth_priv *priv)
 }
 
 int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags);
-int dpaa2_eth_cls_key_size(void);
+int dpaa2_eth_set_cls(struct net_device *net_dev, u64 key);
+int dpaa2_eth_cls_key_size(u64 key);
 int dpaa2_eth_cls_fld_off(int prot, int field);
+void dpaa2_eth_cls_trim_rule(void *key_mem, u64 fields);
 
 #endif /* __DPAA2_H */
index 591dfcf76adbced88e4fd11e4ddcfcd532c9a46e..76bd8d2872cc8e2899e73a71d4f99822d60ed23a 100644 (file)
@@ -264,7 +264,7 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
 }
 
 static int prep_eth_rule(struct ethhdr *eth_value, struct ethhdr *eth_mask,
-                        void *key, void *mask)
+                        void *key, void *mask, u64 *fields)
 {
        int off;
 
@@ -272,18 +272,21 @@ static int prep_eth_rule(struct ethhdr *eth_value, struct ethhdr *eth_mask,
                off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
                *(__be16 *)(key + off) = eth_value->h_proto;
                *(__be16 *)(mask + off) = eth_mask->h_proto;
+               *fields |= DPAA2_ETH_DIST_ETHTYPE;
        }
 
        if (!is_zero_ether_addr(eth_mask->h_source)) {
                off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_SA);
                ether_addr_copy(key + off, eth_value->h_source);
                ether_addr_copy(mask + off, eth_mask->h_source);
+               *fields |= DPAA2_ETH_DIST_ETHSRC;
        }
 
        if (!is_zero_ether_addr(eth_mask->h_dest)) {
                off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_DA);
                ether_addr_copy(key + off, eth_value->h_dest);
                ether_addr_copy(mask + off, eth_mask->h_dest);
+               *fields |= DPAA2_ETH_DIST_ETHDST;
        }
 
        return 0;
@@ -291,7 +294,7 @@ static int prep_eth_rule(struct ethhdr *eth_value, struct ethhdr *eth_mask,
 
 static int prep_uip_rule(struct ethtool_usrip4_spec *uip_value,
                         struct ethtool_usrip4_spec *uip_mask,
-                        void *key, void *mask)
+                        void *key, void *mask, u64 *fields)
 {
        int off;
        u32 tmp_value, tmp_mask;
@@ -303,18 +306,21 @@ static int prep_uip_rule(struct ethtool_usrip4_spec *uip_value,
                off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_SRC);
                *(__be32 *)(key + off) = uip_value->ip4src;
                *(__be32 *)(mask + off) = uip_mask->ip4src;
+               *fields |= DPAA2_ETH_DIST_IPSRC;
        }
 
        if (uip_mask->ip4dst) {
                off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_DST);
                *(__be32 *)(key + off) = uip_value->ip4dst;
                *(__be32 *)(mask + off) = uip_mask->ip4dst;
+               *fields |= DPAA2_ETH_DIST_IPDST;
        }
 
        if (uip_mask->proto) {
                off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_PROTO);
                *(u8 *)(key + off) = uip_value->proto;
                *(u8 *)(mask + off) = uip_mask->proto;
+               *fields |= DPAA2_ETH_DIST_IPPROTO;
        }
 
        if (uip_mask->l4_4_bytes) {
@@ -324,23 +330,26 @@ static int prep_uip_rule(struct ethtool_usrip4_spec *uip_value,
                off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
                *(__be16 *)(key + off) = htons(tmp_value >> 16);
                *(__be16 *)(mask + off) = htons(tmp_mask >> 16);
+               *fields |= DPAA2_ETH_DIST_L4SRC;
 
                off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
                *(__be16 *)(key + off) = htons(tmp_value & 0xFFFF);
                *(__be16 *)(mask + off) = htons(tmp_mask & 0xFFFF);
+               *fields |= DPAA2_ETH_DIST_L4DST;
        }
 
        /* Only apply the rule for IPv4 frames */
        off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
        *(__be16 *)(key + off) = htons(ETH_P_IP);
        *(__be16 *)(mask + off) = htons(0xFFFF);
+       *fields |= DPAA2_ETH_DIST_ETHTYPE;
 
        return 0;
 }
 
 static int prep_l4_rule(struct ethtool_tcpip4_spec *l4_value,
                        struct ethtool_tcpip4_spec *l4_mask,
-                       void *key, void *mask, u8 l4_proto)
+                       void *key, void *mask, u8 l4_proto, u64 *fields)
 {
        int off;
 
@@ -351,41 +360,47 @@ static int prep_l4_rule(struct ethtool_tcpip4_spec *l4_value,
                off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_SRC);
                *(__be32 *)(key + off) = l4_value->ip4src;
                *(__be32 *)(mask + off) = l4_mask->ip4src;
+               *fields |= DPAA2_ETH_DIST_IPSRC;
        }
 
        if (l4_mask->ip4dst) {
                off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_DST);
                *(__be32 *)(key + off) = l4_value->ip4dst;
                *(__be32 *)(mask + off) = l4_mask->ip4dst;
+               *fields |= DPAA2_ETH_DIST_IPDST;
        }
 
        if (l4_mask->psrc) {
                off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
                *(__be16 *)(key + off) = l4_value->psrc;
                *(__be16 *)(mask + off) = l4_mask->psrc;
+               *fields |= DPAA2_ETH_DIST_L4SRC;
        }
 
        if (l4_mask->pdst) {
                off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
                *(__be16 *)(key + off) = l4_value->pdst;
                *(__be16 *)(mask + off) = l4_mask->pdst;
+               *fields |= DPAA2_ETH_DIST_L4DST;
        }
 
        /* Only apply the rule for IPv4 frames with the specified L4 proto */
        off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
        *(__be16 *)(key + off) = htons(ETH_P_IP);
        *(__be16 *)(mask + off) = htons(0xFFFF);
+       *fields |= DPAA2_ETH_DIST_ETHTYPE;
 
        off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_PROTO);
        *(u8 *)(key + off) = l4_proto;
        *(u8 *)(mask + off) = 0xFF;
+       *fields |= DPAA2_ETH_DIST_IPPROTO;
 
        return 0;
 }
 
 static int prep_ext_rule(struct ethtool_flow_ext *ext_value,
                         struct ethtool_flow_ext *ext_mask,
-                        void *key, void *mask)
+                        void *key, void *mask, u64 *fields)
 {
        int off;
 
@@ -396,6 +411,7 @@ static int prep_ext_rule(struct ethtool_flow_ext *ext_value,
                off = dpaa2_eth_cls_fld_off(NET_PROT_VLAN, NH_FLD_VLAN_TCI);
                *(__be16 *)(key + off) = ext_value->vlan_tci;
                *(__be16 *)(mask + off) = ext_mask->vlan_tci;
+               *fields |= DPAA2_ETH_DIST_VLAN;
        }
 
        return 0;
@@ -403,7 +419,7 @@ static int prep_ext_rule(struct ethtool_flow_ext *ext_value,
 
 static int prep_mac_ext_rule(struct ethtool_flow_ext *ext_value,
                             struct ethtool_flow_ext *ext_mask,
-                            void *key, void *mask)
+                            void *key, void *mask, u64 *fields)
 {
        int off;
 
@@ -411,36 +427,38 @@ static int prep_mac_ext_rule(struct ethtool_flow_ext *ext_value,
                off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_DA);
                ether_addr_copy(key + off, ext_value->h_dest);
                ether_addr_copy(mask + off, ext_mask->h_dest);
+               *fields |= DPAA2_ETH_DIST_ETHDST;
        }
 
        return 0;
 }
 
-static int prep_cls_rule(struct ethtool_rx_flow_spec *fs, void *key, void *mask)
+static int prep_cls_rule(struct ethtool_rx_flow_spec *fs, void *key, void *mask,
+                        u64 *fields)
 {
        int err;
 
        switch (fs->flow_type & 0xFF) {
        case ETHER_FLOW:
                err = prep_eth_rule(&fs->h_u.ether_spec, &fs->m_u.ether_spec,
-                                   key, mask);
+                                   key, mask, fields);
                break;
        case IP_USER_FLOW:
                err = prep_uip_rule(&fs->h_u.usr_ip4_spec,
-                                   &fs->m_u.usr_ip4_spec, key, mask);
+                                   &fs->m_u.usr_ip4_spec, key, mask, fields);
                break;
        case TCP_V4_FLOW:
                err = prep_l4_rule(&fs->h_u.tcp_ip4_spec, &fs->m_u.tcp_ip4_spec,
-                                  key, mask, IPPROTO_TCP);
+                                  key, mask, IPPROTO_TCP, fields);
                break;
        case UDP_V4_FLOW:
                err = prep_l4_rule(&fs->h_u.udp_ip4_spec, &fs->m_u.udp_ip4_spec,
-                                  key, mask, IPPROTO_UDP);
+                                  key, mask, IPPROTO_UDP, fields);
                break;
        case SCTP_V4_FLOW:
                err = prep_l4_rule(&fs->h_u.sctp_ip4_spec,
                                   &fs->m_u.sctp_ip4_spec, key, mask,
-                                  IPPROTO_SCTP);
+                                  IPPROTO_SCTP, fields);
                break;
        default:
                return -EOPNOTSUPP;
@@ -450,13 +468,14 @@ static int prep_cls_rule(struct ethtool_rx_flow_spec *fs, void *key, void *mask)
                return err;
 
        if (fs->flow_type & FLOW_EXT) {
-               err = prep_ext_rule(&fs->h_ext, &fs->m_ext, key, mask);
+               err = prep_ext_rule(&fs->h_ext, &fs->m_ext, key, mask, fields);
                if (err)
                        return err;
        }
 
        if (fs->flow_type & FLOW_MAC_EXT) {
-               err = prep_mac_ext_rule(&fs->h_ext, &fs->m_ext, key, mask);
+               err = prep_mac_ext_rule(&fs->h_ext, &fs->m_ext, key, mask,
+                                       fields);
                if (err)
                        return err;
        }
@@ -473,6 +492,7 @@ static int do_cls_rule(struct net_device *net_dev,
        struct dpni_rule_cfg rule_cfg = { 0 };
        struct dpni_fs_action_cfg fs_act = { 0 };
        dma_addr_t key_iova;
+       u64 fields = 0;
        void *key_buf;
        int err;
 
@@ -480,7 +500,7 @@ static int do_cls_rule(struct net_device *net_dev,
            fs->ring_cookie >= dpaa2_eth_queue_count(priv))
                return -EINVAL;
 
-       rule_cfg.key_size = dpaa2_eth_cls_key_size();
+       rule_cfg.key_size = dpaa2_eth_cls_key_size(DPAA2_ETH_DIST_ALL);
 
        /* allocate twice the key size, for the actual key and for mask */
        key_buf = kzalloc(rule_cfg.key_size * 2, GFP_KERNEL);
@@ -488,10 +508,36 @@ static int do_cls_rule(struct net_device *net_dev,
                return -ENOMEM;
 
        /* Fill the key and mask memory areas */
-       err = prep_cls_rule(fs, key_buf, key_buf + rule_cfg.key_size);
+       err = prep_cls_rule(fs, key_buf, key_buf + rule_cfg.key_size, &fields);
        if (err)
                goto free_mem;
 
+       if (!dpaa2_eth_fs_mask_enabled(priv)) {
+               /* Masking allows us to configure a maximal key during init and
+                * use it for all flow steering rules. Without it, we include
+                * in the key only the fields actually used, so we need to
+                * extract the others from the final key buffer.
+                *
+                * Program the FS key if needed, or return error if previously
+                * set key can't be used for the current rule. User needs to
+                * delete existing rules in this case to allow for the new one.
+                */
+               if (!priv->rx_cls_fields) {
+                       err = dpaa2_eth_set_cls(net_dev, fields);
+                       if (err)
+                               goto free_mem;
+
+                       priv->rx_cls_fields = fields;
+               } else if (priv->rx_cls_fields != fields) {
+                       netdev_err(net_dev, "No support for multiple FS keys, need to delete existing rules\n");
+                       err = -EOPNOTSUPP;
+                       goto free_mem;
+               }
+
+               dpaa2_eth_cls_trim_rule(key_buf, fields);
+               rule_cfg.key_size = dpaa2_eth_cls_key_size(fields);
+       }
+
        key_iova = dma_map_single(dev, key_buf, rule_cfg.key_size * 2,
                                  DMA_TO_DEVICE);
        if (dma_mapping_error(dev, key_iova)) {
@@ -500,7 +546,8 @@ static int do_cls_rule(struct net_device *net_dev,
        }
 
        rule_cfg.key_iova = key_iova;
-       rule_cfg.mask_iova = key_iova + rule_cfg.key_size;
+       if (dpaa2_eth_fs_mask_enabled(priv))
+               rule_cfg.mask_iova = key_iova + rule_cfg.key_size;
 
        if (add) {
                if (fs->ring_cookie == RX_CLS_FLOW_DISC)
@@ -522,6 +569,17 @@ static int do_cls_rule(struct net_device *net_dev,
        return err;
 }
 
+static int num_rules(struct dpaa2_eth_priv *priv)
+{
+       int i, rules = 0;
+
+       for (i = 0; i < dpaa2_eth_fs_count(priv); i++)
+               if (priv->cls_rules[i].in_use)
+                       rules++;
+
+       return rules;
+}
+
 static int update_cls_rule(struct net_device *net_dev,
                           struct ethtool_rx_flow_spec *new_fs,
                           int location)
@@ -545,6 +603,9 @@ static int update_cls_rule(struct net_device *net_dev,
                        return err;
 
                rule->in_use = 0;
+
+               if (!dpaa2_eth_fs_mask_enabled(priv) && !num_rules(priv))
+                       priv->rx_cls_fields = 0;
        }
 
        /* If no new entry to add, return here */
@@ -581,9 +642,7 @@ static int dpaa2_eth_get_rxnfc(struct net_device *net_dev,
                break;
        case ETHTOOL_GRXCLSRLCNT:
                rxnfc->rule_cnt = 0;
-               for (i = 0; i < max_rules; i++)
-                       if (priv->cls_rules[i].in_use)
-                               rxnfc->rule_cnt++;
+               rxnfc->rule_cnt = num_rules(priv);
                rxnfc->data = max_rules;
                break;
        case ETHTOOL_GRXCLSRULE:
index 697c2427f2b70c06c87dd00ae23d4e0b06d4fc3d..a96ad20ee4843e9cdd02c55a3e7286a51679efea 100644 (file)
@@ -1840,13 +1840,9 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
        int ret;
 
        if (enable) {
-               ret = clk_prepare_enable(fep->clk_ahb);
-               if (ret)
-                       return ret;
-
                ret = clk_prepare_enable(fep->clk_enet_out);
                if (ret)
-                       goto failed_clk_enet_out;
+                       return ret;
 
                if (fep->clk_ptp) {
                        mutex_lock(&fep->ptp_clk_mutex);
@@ -1866,7 +1862,6 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
 
                phy_reset_after_clk_enable(ndev->phydev);
        } else {
-               clk_disable_unprepare(fep->clk_ahb);
                clk_disable_unprepare(fep->clk_enet_out);
                if (fep->clk_ptp) {
                        mutex_lock(&fep->ptp_clk_mutex);
@@ -1885,8 +1880,6 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
 failed_clk_ptp:
        if (fep->clk_enet_out)
                clk_disable_unprepare(fep->clk_enet_out);
-failed_clk_enet_out:
-               clk_disable_unprepare(fep->clk_ahb);
 
        return ret;
 }
@@ -3470,6 +3463,9 @@ fec_probe(struct platform_device *pdev)
        ret = clk_prepare_enable(fep->clk_ipg);
        if (ret)
                goto failed_clk_ipg;
+       ret = clk_prepare_enable(fep->clk_ahb);
+       if (ret)
+               goto failed_clk_ahb;
 
        fep->reg_phy = devm_regulator_get_optional(&pdev->dev, "phy");
        if (!IS_ERR(fep->reg_phy)) {
@@ -3563,6 +3559,9 @@ fec_probe(struct platform_device *pdev)
        pm_runtime_put(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
 failed_regulator:
+       clk_disable_unprepare(fep->clk_ahb);
+failed_clk_ahb:
+       clk_disable_unprepare(fep->clk_ipg);
 failed_clk_ipg:
        fec_enet_clk_enable(ndev, false);
 failed_clk:
@@ -3686,6 +3685,7 @@ static int __maybe_unused fec_runtime_suspend(struct device *dev)
        struct net_device *ndev = dev_get_drvdata(dev);
        struct fec_enet_private *fep = netdev_priv(ndev);
 
+       clk_disable_unprepare(fep->clk_ahb);
        clk_disable_unprepare(fep->clk_ipg);
 
        return 0;
@@ -3695,8 +3695,20 @@ static int __maybe_unused fec_runtime_resume(struct device *dev)
 {
        struct net_device *ndev = dev_get_drvdata(dev);
        struct fec_enet_private *fep = netdev_priv(ndev);
+       int ret;
 
-       return clk_prepare_enable(fep->clk_ipg);
+       ret = clk_prepare_enable(fep->clk_ahb);
+       if (ret)
+               return ret;
+       ret = clk_prepare_enable(fep->clk_ipg);
+       if (ret)
+               goto failed_clk_ipg;
+
+       return 0;
+
+failed_clk_ipg:
+       clk_disable_unprepare(fep->clk_ahb);
+       return ret;
 }
 
 static const struct dev_pm_ops fec_pm_ops = {
index 79d03f8ee7b180d2cab9a2a647254461c0a0cb08..c7fa97a7e1f4d4b07dd6b00f7c5c7bffca4a0356 100644 (file)
@@ -150,7 +150,6 @@ static int hnae_alloc_buffers(struct hnae_ring *ring)
 /* free desc along with its attached buffer */
 static void hnae_free_desc(struct hnae_ring *ring)
 {
-       hnae_free_buffers(ring);
        dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr,
                         ring->desc_num * sizeof(ring->desc[0]),
                         ring_to_dma_dir(ring));
@@ -183,6 +182,9 @@ static int hnae_alloc_desc(struct hnae_ring *ring)
 /* fini ring, also free the buffer for the ring */
 static void hnae_fini_ring(struct hnae_ring *ring)
 {
+       if (is_rx_ring(ring))
+               hnae_free_buffers(ring);
+
        hnae_free_desc(ring);
        kfree(ring->desc_cb);
        ring->desc_cb = NULL;
index 08a750fb60c49d397c61845130e153fe1e3b0b3e..d6fb8343723041992d12cfa505f222822bd603fd 100644 (file)
@@ -357,7 +357,7 @@ struct hnae_buf_ops {
 };
 
 struct hnae_queue {
-       void __iomem *io_base;
+       u8 __iomem *io_base;
        phys_addr_t phy_base;
        struct hnae_ae_dev *dev;        /* the device who use this queue */
        struct hnae_ring rx_ring ____cacheline_internodealigned_in_smp;
index a97228c93831d69fe2211317486f14a25197e740..6c0507921623b843bb2010321782ace35a24b8a3 100644 (file)
@@ -370,7 +370,7 @@ int hns_mac_clr_multicast(struct hns_mac_cb *mac_cb, int vfn)
 static void hns_mac_param_get(struct mac_params *param,
                              struct hns_mac_cb *mac_cb)
 {
-       param->vaddr = (void *)mac_cb->vaddr;
+       param->vaddr = mac_cb->vaddr;
        param->mac_mode = hns_get_enet_interface(mac_cb);
        ether_addr_copy(param->addr, mac_cb->addr_entry_idx[0].addr);
        param->mac_id = mac_cb->mac_id;
index fbc75341bef760b82a1d7a10469d5649db91e366..22589799f1a575127f77f733e9e21a28889efa46 100644 (file)
@@ -187,7 +187,7 @@ struct mac_statistics {
 /*mac para struct ,mac get param from nic or dsaf when initialize*/
 struct mac_params {
        char addr[ETH_ALEN];
-       void *vaddr; /*virtual address*/
+       u8 __iomem *vaddr; /*virtual address*/
        struct device *dev;
        u8 mac_id;
        /**< Ethernet operation mode (MAC-PHY interface and speed) */
@@ -402,7 +402,7 @@ struct mac_driver {
        enum mac_mode mac_mode;
        u8 mac_id;
        struct hns_mac_cb *mac_cb;
-       void __iomem *io_base;
+       u8 __iomem *io_base;
        unsigned int mac_en_flg;/*you'd better don't enable mac twice*/
        unsigned int virt_dev_num;
        struct device *dev;
index ac55db065f167ad58f9ec41966afa7b0299b5f40..e05d2095d09bf869479e01bed78f789e62135237 100644 (file)
@@ -1602,8 +1602,6 @@ static void hns_dsaf_set_mac_key(
                       DSAF_TBL_TCAM_KEY_VLAN_S, vlan_id);
        dsaf_set_field(mac_key->low.bits.port_vlan, DSAF_TBL_TCAM_KEY_PORT_M,
                       DSAF_TBL_TCAM_KEY_PORT_S, port);
-
-       mac_key->low.bits.port_vlan = le16_to_cpu(mac_key->low.bits.port_vlan);
 }
 
 /**
@@ -1663,8 +1661,8 @@ int hns_dsaf_set_mac_uc_entry(
        /* default config dvc to 0 */
        mac_data.tbl_ucast_dvc = 0;
        mac_data.tbl_ucast_out_port = mac_entry->port_num;
-       tcam_data.tbl_tcam_data_high = cpu_to_le32(mac_key.high.val);
-       tcam_data.tbl_tcam_data_low = cpu_to_le32(mac_key.low.val);
+       tcam_data.tbl_tcam_data_high = mac_key.high.val;
+       tcam_data.tbl_tcam_data_low = mac_key.low.val;
 
        hns_dsaf_tcam_uc_cfg(dsaf_dev, entry_index, &tcam_data, &mac_data);
 
@@ -1786,9 +1784,6 @@ int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev,
                                     0xff,
                                     mc_mask);
 
-               mask_key.high.val = le32_to_cpu(mask_key.high.val);
-               mask_key.low.val = le32_to_cpu(mask_key.low.val);
-
                pmask_key = (struct dsaf_tbl_tcam_data *)(&mask_key);
        }
 
@@ -1840,8 +1835,8 @@ int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev,
                dsaf_dev->ae_dev.name, mac_key.high.val,
                mac_key.low.val, entry_index);
 
-       tcam_data.tbl_tcam_data_high = cpu_to_le32(mac_key.high.val);
-       tcam_data.tbl_tcam_data_low = cpu_to_le32(mac_key.low.val);
+       tcam_data.tbl_tcam_data_high = mac_key.high.val;
+       tcam_data.tbl_tcam_data_low = mac_key.low.val;
 
        /* config mc entry with mask */
        hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index, &tcam_data,
@@ -1956,9 +1951,6 @@ int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev,
                /* config key mask */
                hns_dsaf_set_mac_key(dsaf_dev, &mask_key, 0x00, 0xff, mc_mask);
 
-               mask_key.high.val = le32_to_cpu(mask_key.high.val);
-               mask_key.low.val = le32_to_cpu(mask_key.low.val);
-
                pmask_key = (struct dsaf_tbl_tcam_data *)(&mask_key);
        }
 
@@ -2012,8 +2004,8 @@ int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev,
                soft_mac_entry += entry_index;
                soft_mac_entry->index = DSAF_INVALID_ENTRY_IDX;
        } else { /* not zero, just del port, update */
-               tcam_data.tbl_tcam_data_high = cpu_to_le32(mac_key.high.val);
-               tcam_data.tbl_tcam_data_low = cpu_to_le32(mac_key.low.val);
+               tcam_data.tbl_tcam_data_high = mac_key.high.val;
+               tcam_data.tbl_tcam_data_low = mac_key.low.val;
 
                hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index,
                                     &tcam_data,
@@ -2750,6 +2742,17 @@ int hns_dsaf_get_regs_count(void)
        return DSAF_DUMP_REGS_NUM;
 }
 
+static int hns_dsaf_get_port_id(u8 port)
+{
+       if (port < DSAF_SERVICE_NW_NUM)
+               return port;
+
+       if (port >= DSAF_BASE_INNER_PORT_NUM)
+               return port - DSAF_BASE_INNER_PORT_NUM + DSAF_SERVICE_NW_NUM;
+
+       return -EINVAL;
+}
+
 static void set_promisc_tcam_enable(struct dsaf_device *dsaf_dev, u32 port)
 {
        struct dsaf_tbl_tcam_ucast_cfg tbl_tcam_ucast = {0, 1, 0, 0, 0x80};
@@ -2766,7 +2769,7 @@ static void set_promisc_tcam_enable(struct dsaf_device *dsaf_dev, u32 port)
        struct hns_mac_cb *mac_cb;
        u8 addr[ETH_ALEN] = {0};
        u8 port_num;
-       u16 mskid;
+       int mskid;
 
        /* promisc use vague table match with vlanid = 0 & macaddr = 0 */
        hns_dsaf_set_mac_key(dsaf_dev, &mac_key, 0x00, port, addr);
@@ -2815,23 +2818,33 @@ static void set_promisc_tcam_enable(struct dsaf_device *dsaf_dev, u32 port)
        memset(&temp_key, 0x0, sizeof(temp_key));
        mask_entry.addr[0] = 0x01;
        hns_dsaf_set_mac_key(dsaf_dev, &mask_key, mask_entry.in_vlan_id,
-                            port, mask_entry.addr);
+                            0xf, mask_entry.addr);
        tbl_tcam_mcast.tbl_mcast_item_vld = 1;
        tbl_tcam_mcast.tbl_mcast_old_en = 0;
 
-       if (port < DSAF_SERVICE_NW_NUM) {
-               mskid = port;
-       } else if (port >= DSAF_BASE_INNER_PORT_NUM) {
-               mskid = port - DSAF_BASE_INNER_PORT_NUM + DSAF_SERVICE_NW_NUM;
-       } else {
+       /* set MAC port to handle multicast */
+       mskid = hns_dsaf_get_port_id(port);
+       if (mskid == -EINVAL) {
                dev_err(dsaf_dev->dev, "%s,pnum(%d)error,key(%#x:%#x)\n",
                        dsaf_dev->ae_dev.name, port,
                        mask_key.high.val, mask_key.low.val);
                return;
        }
+       dsaf_set_bit(tbl_tcam_mcast.tbl_mcast_port_msk[mskid / 32],
+                    mskid % 32, 1);
 
+       /* set pool bit map to handle multicast */
+       mskid = hns_dsaf_get_port_id(port_num);
+       if (mskid == -EINVAL) {
+               dev_err(dsaf_dev->dev,
+                       "%s, pool bit map pnum(%d)error,key(%#x:%#x)\n",
+                       dsaf_dev->ae_dev.name, port_num,
+                       mask_key.high.val, mask_key.low.val);
+               return;
+       }
        dsaf_set_bit(tbl_tcam_mcast.tbl_mcast_port_msk[mskid / 32],
                     mskid % 32, 1);
+
        memcpy(&temp_key, &mask_key, sizeof(mask_key));
        hns_dsaf_tcam_mc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_mc,
                                   (struct dsaf_tbl_tcam_data *)(&mask_key),
index 0e1cd99831a6083faa790aa80be1f6c635b15a50..76cc8887e1a83599c9178e88787ecb6c6a2b8784 100644 (file)
@@ -467,4 +467,6 @@ int hns_dsaf_clr_mac_mc_port(struct dsaf_device *dsaf_dev,
                             u8 mac_id, u8 port_num);
 int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port);
 
+int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset);
+
 #endif /* __HNS_DSAF_MAIN_H__ */
index 16294cd3c95459891c65080cd49c61f839afaa60..19b94879691f86e0ec969e40ed5a057cb24c3d7b 100644 (file)
@@ -670,7 +670,7 @@ static int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, bool en)
                dsaf_set_field(origin, 1ull << 10, 10, en);
                dsaf_write_syscon(mac_cb->serdes_ctrl, reg_offset, origin);
        } else {
-               u8 *base_addr = (u8 *)mac_cb->serdes_vaddr +
+               u8 __iomem *base_addr = mac_cb->serdes_vaddr +
                                (mac_cb->mac_id <= 3 ? 0x00280000 : 0x00200000);
                dsaf_set_reg_field(base_addr, reg_offset, 1ull << 10, 10, en);
        }
index 3d07c8a7639dad46c5b810b19f56ba84fdfc655a..17c019106e6e40d8f281deb87b1928367582e371 100644 (file)
@@ -61,7 +61,7 @@ void hns_ppe_set_indir_table(struct hns_ppe_cb *ppe_cb,
        }
 }
 
-static void __iomem *
+static u8 __iomem *
 hns_ppe_common_get_ioaddr(struct ppe_common_cb *ppe_common)
 {
        return ppe_common->dsaf_dev->ppe_base + PPE_COMMON_REG_OFFSET;
@@ -111,8 +111,8 @@ hns_ppe_common_free_cfg(struct dsaf_device *dsaf_dev, u32 comm_index)
        dsaf_dev->ppe_common[comm_index] = NULL;
 }
 
-static void __iomem *hns_ppe_get_iobase(struct ppe_common_cb *ppe_common,
-                                       int ppe_idx)
+static u8 __iomem *hns_ppe_get_iobase(struct ppe_common_cb *ppe_common,
+                                     int ppe_idx)
 {
        return ppe_common->dsaf_dev->ppe_base + ppe_idx * PPE_REG_OFFSET;
 }
index f670e63a5a018cd5b48b4a62093c104905aa4463..110c6e8222c7038a6eb24c1854490845293e320d 100644 (file)
@@ -80,7 +80,7 @@ struct hns_ppe_cb {
        struct hns_ppe_hw_stats hw_stats;
 
        u8 index;       /* index in a ppe common device */
-       void __iomem *io_base;
+       u8 __iomem *io_base;
        int virq;
        u32 rss_indir_table[HNS_PPEV2_RSS_IND_TBL_SIZE]; /*shadow indir tab */
        u32 rss_key[HNS_PPEV2_RSS_KEY_NUM]; /* rss hash key */
@@ -89,7 +89,7 @@ struct hns_ppe_cb {
 struct ppe_common_cb {
        struct device *dev;
        struct dsaf_device *dsaf_dev;
-       void __iomem *io_base;
+       u8 __iomem *io_base;
 
        enum ppe_common_mode ppe_mode;
 
index 6bf346c11b25a5c87bceca81abf1bfc83188d768..ac3518ca4d7bec5b3737cef78081202dfcf1f53d 100644 (file)
@@ -458,7 +458,7 @@ static void hns_rcb_ring_get_cfg(struct hnae_queue *q, int ring_type)
                mdnum_ppkt = HNS_RCB_RING_MAX_BD_PER_PKT;
        } else {
                ring = &q->tx_ring;
-               ring->io_base = (u8 __iomem *)ring_pair_cb->q.io_base +
+               ring->io_base = ring_pair_cb->q.io_base +
                        HNS_RCB_TX_REG_OFFSET;
                irq_idx = HNS_RCB_IRQ_IDX_TX;
                mdnum_ppkt = is_ver1 ? HNS_RCB_RING_MAX_TXBD_PER_PKT :
@@ -764,7 +764,7 @@ static int hns_rcb_get_ring_num(struct dsaf_device *dsaf_dev)
        }
 }
 
-static void __iomem *hns_rcb_common_get_vaddr(struct rcb_common_cb *rcb_common)
+static u8 __iomem *hns_rcb_common_get_vaddr(struct rcb_common_cb *rcb_common)
 {
        struct dsaf_device *dsaf_dev = rcb_common->dsaf_dev;
 
index b9733b0b848263bc9a25ccf42f3d8b433a7b9e5e..b9e7f11f08968099c76a99c3f119e77a909c72ea 100644 (file)
 #define XGMAC_PAUSE_CTL_RSP_MODE_B     2
 #define XGMAC_PAUSE_CTL_TX_XOFF_B      3
 
-static inline void dsaf_write_reg(void __iomem *base, u32 reg, u32 value)
+static inline void dsaf_write_reg(u8 __iomem *base, u32 reg, u32 value)
 {
        writel(value, base + reg);
 }
@@ -1053,7 +1053,7 @@ static inline int dsaf_read_syscon(struct regmap *base, u32 reg, u32 *val)
 #define dsaf_set_bit(origin, shift, val) \
        dsaf_set_field((origin), (1ull << (shift)), (shift), (val))
 
-static inline void dsaf_set_reg_field(void __iomem *base, u32 reg, u32 mask,
+static inline void dsaf_set_reg_field(u8 __iomem *base, u32 reg, u32 mask,
                                      u32 shift, u32 val)
 {
        u32 origin = dsaf_read_reg(base, reg);
@@ -1073,7 +1073,7 @@ static inline void dsaf_set_reg_field(void __iomem *base, u32 reg, u32 mask,
 #define dsaf_get_bit(origin, shift) \
        dsaf_get_field((origin), (1ull << (shift)), (shift))
 
-static inline u32 dsaf_get_reg_field(void __iomem *base, u32 reg, u32 mask,
+static inline u32 dsaf_get_reg_field(u8 __iomem *base, u32 reg, u32 mask,
                                     u32 shift)
 {
        u32 origin;
@@ -1089,11 +1089,11 @@ static inline u32 dsaf_get_reg_field(void __iomem *base, u32 reg, u32 mask,
        dsaf_get_reg_field((dev)->io_base, (reg), (1ull << (bit)), (bit))
 
 #define dsaf_write_b(addr, data)\
-       writeb((data), (__iomem unsigned char *)(addr))
+       writeb((data), (__iomem u8 *)(addr))
 #define dsaf_read_b(addr)\
-       readb((__iomem unsigned char *)(addr))
+       readb((__iomem u8 *)(addr))
 
 #define hns_mac_reg_read64(drv, offset) \
-       readq((__iomem void *)(((u8 *)(drv)->io_base + 0xc00 + (offset))))
+       readq((__iomem void *)(((drv)->io_base + 0xc00 + (offset))))
 
 #endif /* _DSAF_REG_H */
index ba4316910dea1726da855c13b78e95bb6bd36a3c..a60f207768fc7152edbbf9a5394afaa080260d8b 100644 (file)
@@ -129,7 +129,7 @@ static void hns_xgmac_lf_rf_control_init(struct mac_driver *mac_drv)
        dsaf_set_bit(val, XGMAC_UNIDIR_EN_B, 0);
        dsaf_set_bit(val, XGMAC_RF_TX_EN_B, 1);
        dsaf_set_field(val, XGMAC_LF_RF_INSERT_M, XGMAC_LF_RF_INSERT_S, 0);
-       dsaf_write_reg(mac_drv, XGMAC_MAC_TX_LF_RF_CONTROL_REG, val);
+       dsaf_write_dev(mac_drv, XGMAC_MAC_TX_LF_RF_CONTROL_REG, val);
 }
 
 /**
index e37a0ca0db896b487f2fd544e4d6db87984ce275..297b95c1b3c1f5780d4c71915d380c46f83c8489 100644 (file)
@@ -29,9 +29,6 @@
 
 #define SERVICE_TIMER_HZ (1 * HZ)
 
-#define NIC_TX_CLEAN_MAX_NUM 256
-#define NIC_RX_CLEAN_MAX_NUM 64
-
 #define RCB_IRQ_NOT_INITED 0
 #define RCB_IRQ_INITED 1
 #define HNS_BUFFER_SIZE_2048 2048
@@ -376,8 +373,6 @@ netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
        wmb(); /* commit all data before submit */
        assert(skb->queue_mapping < priv->ae_handle->q_num);
        hnae_queue_xmit(priv->ae_handle->qs[skb->queue_mapping], buf_num);
-       ring->stats.tx_pkts++;
-       ring->stats.tx_bytes += skb->len;
 
        return NETDEV_TX_OK;
 
@@ -999,6 +994,9 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data,
                /* issue prefetch for next Tx descriptor */
                prefetch(&ring->desc_cb[ring->next_to_clean]);
        }
+       /* update tx ring statistics. */
+       ring->stats.tx_pkts += pkts;
+       ring->stats.tx_bytes += bytes;
 
        NETIF_TX_UNLOCK(ring);
 
@@ -2151,7 +2149,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
                        hns_nic_tx_fini_pro_v2;
 
                netif_napi_add(priv->netdev, &rd->napi,
-                              hns_nic_common_poll, NIC_TX_CLEAN_MAX_NUM);
+                              hns_nic_common_poll, NAPI_POLL_WEIGHT);
                rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
        }
        for (i = h->q_num; i < h->q_num * 2; i++) {
@@ -2164,7 +2162,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
                        hns_nic_rx_fini_pro_v2;
 
                netif_napi_add(priv->netdev, &rd->napi,
-                              hns_nic_common_poll, NIC_RX_CLEAN_MAX_NUM);
+                              hns_nic_common_poll, NAPI_POLL_WEIGHT);
                rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
        }
 
index 299b277bc7ae98294d0bf42aeee787e16f7beb8d..360463a40ba9d74f258506118322baa10ab31148 100644 (file)
@@ -43,6 +43,8 @@ enum HCLGE_MBX_OPCODE {
        HCLGE_MBX_GET_QID_IN_PF,        /* (VF -> PF) get queue id in pf */
        HCLGE_MBX_LINK_STAT_MODE,       /* (PF -> VF) link mode has changed */
        HCLGE_MBX_GET_LINK_MODE,        /* (VF -> PF) get the link mode of pf */
+       HLCGE_MBX_PUSH_VLAN_INFO,       /* (PF -> VF) push port base vlan */
+       HCLGE_MBX_GET_MEDIA_TYPE,       /* (VF -> PF) get media type */
 
        HCLGE_MBX_GET_VF_FLR_STATUS = 200, /* (M7 -> PF) get vf reset status */
 };
@@ -62,6 +64,8 @@ enum hclge_mbx_vlan_cfg_subcode {
        HCLGE_MBX_VLAN_FILTER = 0,      /* set vlan filter */
        HCLGE_MBX_VLAN_TX_OFF_CFG,      /* set tx side vlan offload */
        HCLGE_MBX_VLAN_RX_OFF_CFG,      /* set rx side vlan offload */
+       HCLGE_MBX_PORT_BASE_VLAN_CFG,   /* set port based vlan configuration */
+       HCLGE_MBX_GET_PORT_BASE_VLAN_STATE,     /* get port based vlan state */
 };
 
 #define HCLGE_MBX_MAX_MSG_SIZE 16
index 17ab4f4af6ad52259e3d0311ed2c751271579af7..fa8b8506b120a46173d88206b0b469d5b39eaef5 100644 (file)
@@ -76,8 +76,8 @@ static int hnae3_get_client_init_flag(struct hnae3_client *client,
        return inited;
 }
 
-static int hnae3_match_n_instantiate(struct hnae3_client *client,
-                                    struct hnae3_ae_dev *ae_dev, bool is_reg)
+static int hnae3_init_client_instance(struct hnae3_client *client,
+                                     struct hnae3_ae_dev *ae_dev)
 {
        int ret;
 
@@ -87,23 +87,27 @@ static int hnae3_match_n_instantiate(struct hnae3_client *client,
                return 0;
        }
 
-       /* now, (un-)instantiate client by calling lower layer */
-       if (is_reg) {
-               ret = ae_dev->ops->init_client_instance(client, ae_dev);
-               if (ret)
-                       dev_err(&ae_dev->pdev->dev,
-                               "fail to instantiate client, ret = %d\n", ret);
+       ret = ae_dev->ops->init_client_instance(client, ae_dev);
+       if (ret)
+               dev_err(&ae_dev->pdev->dev,
+                       "fail to instantiate client, ret = %d\n", ret);
 
-               return ret;
-       }
+       return ret;
+}
+
+static void hnae3_uninit_client_instance(struct hnae3_client *client,
+                                        struct hnae3_ae_dev *ae_dev)
+{
+       /* check if this client matches the type of ae_dev */
+       if (!(hnae3_client_match(client->type, ae_dev->dev_type) &&
+             hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B)))
+               return;
 
        if (hnae3_get_client_init_flag(client, ae_dev)) {
                ae_dev->ops->uninit_client_instance(client, ae_dev);
 
                hnae3_set_client_init_flag(client, ae_dev, 0);
        }
-
-       return 0;
 }
 
 int hnae3_register_client(struct hnae3_client *client)
@@ -129,7 +133,7 @@ int hnae3_register_client(struct hnae3_client *client)
                /* if the client could not be initialized on current port, for
                 * any error reasons, move on to next available port
                 */
-               ret = hnae3_match_n_instantiate(client, ae_dev, true);
+               ret = hnae3_init_client_instance(client, ae_dev);
                if (ret)
                        dev_err(&ae_dev->pdev->dev,
                                "match and instantiation failed for port, ret = %d\n",
@@ -153,7 +157,7 @@ void hnae3_unregister_client(struct hnae3_client *client)
        mutex_lock(&hnae3_common_lock);
        /* un-initialize the client on every matched port */
        list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) {
-               hnae3_match_n_instantiate(client, ae_dev, false);
+               hnae3_uninit_client_instance(client, ae_dev);
        }
 
        list_del(&client->node);
@@ -205,7 +209,7 @@ void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo)
                 * initialize the figure out client instance
                 */
                list_for_each_entry(client, &hnae3_client_list, node) {
-                       ret = hnae3_match_n_instantiate(client, ae_dev, true);
+                       ret = hnae3_init_client_instance(client, ae_dev);
                        if (ret)
                                dev_err(&ae_dev->pdev->dev,
                                        "match and instantiation failed, ret = %d\n",
@@ -243,7 +247,7 @@ void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo)
                 * un-initialize the figure out client instance
                 */
                list_for_each_entry(client, &hnae3_client_list, node)
-                       hnae3_match_n_instantiate(client, ae_dev, false);
+                       hnae3_uninit_client_instance(client, ae_dev);
 
                ae_algo->ops->uninit_ae_dev(ae_dev);
                hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0);
@@ -301,7 +305,7 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
         * initialize the figure out client instance
         */
        list_for_each_entry(client, &hnae3_client_list, node) {
-               ret = hnae3_match_n_instantiate(client, ae_dev, true);
+               ret = hnae3_init_client_instance(client, ae_dev);
                if (ret)
                        dev_err(&ae_dev->pdev->dev,
                                "match and instantiation failed, ret = %d\n",
@@ -343,7 +347,7 @@ void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev)
                        continue;
 
                list_for_each_entry(client, &hnae3_client_list, node)
-                       hnae3_match_n_instantiate(client, ae_dev, false);
+                       hnae3_uninit_client_instance(client, ae_dev);
 
                ae_algo->ops->uninit_ae_dev(ae_dev);
                hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0);
index 38b430f11fc14f6d474faa4df7a28f2ef19fc287..681c1752c1e3cedb4f53953f7adffb96ec037421 100644 (file)
@@ -147,6 +147,13 @@ enum hnae3_flr_state {
        HNAE3_FLR_DONE,
 };
 
+enum hnae3_port_base_vlan_state {
+       HNAE3_PORT_BASE_VLAN_DISABLE,
+       HNAE3_PORT_BASE_VLAN_ENABLE,
+       HNAE3_PORT_BASE_VLAN_MODIFY,
+       HNAE3_PORT_BASE_VLAN_NOCHANGE,
+};
+
 struct hnae3_vector_info {
        u8 __iomem *io_addr;
        int vector;
@@ -578,6 +585,8 @@ struct hnae3_handle {
 
        u32 numa_node_mask;     /* for multi-chip support */
 
+       enum hnae3_port_base_vlan_state port_base_vlan_state;
+
        u8 netdev_flags;
        struct dentry *hnae3_dbgfs;
 };
index 21085c4bf66b99fc1e2b953e14ac7ccd55023930..923343858f514e6b84c9e9288ccfd617e7b88600 100644 (file)
@@ -963,6 +963,16 @@ static int hns3_fill_desc_vtags(struct sk_buff *skb,
 {
 #define HNS3_TX_VLAN_PRIO_SHIFT 13
 
+       struct hnae3_handle *handle = tx_ring->tqp->handle;
+
+       /* Since HW limitation, if port based insert VLAN enabled, only one VLAN
+        * header is allowed in skb, otherwise it will cause RAS error.
+        */
+       if (unlikely(skb_vlan_tagged_multi(skb) &&
+                    handle->port_base_vlan_state ==
+                    HNAE3_PORT_BASE_VLAN_ENABLE))
+               return -EINVAL;
+
        if (skb->protocol == htons(ETH_P_8021Q) &&
            !(tx_ring->tqp->handle->kinfo.netdev->features &
            NETIF_F_HW_VLAN_CTAG_TX)) {
@@ -984,8 +994,16 @@ static int hns3_fill_desc_vtags(struct sk_buff *skb,
                 * and use inner_vtag in one tag case.
                 */
                if (skb->protocol == htons(ETH_P_8021Q)) {
-                       hns3_set_field(*out_vlan_flag, HNS3_TXD_OVLAN_B, 1);
-                       *out_vtag = vlan_tag;
+                       if (handle->port_base_vlan_state ==
+                           HNAE3_PORT_BASE_VLAN_DISABLE){
+                               hns3_set_field(*out_vlan_flag,
+                                              HNS3_TXD_OVLAN_B, 1);
+                               *out_vtag = vlan_tag;
+                       } else {
+                               hns3_set_field(*inner_vlan_flag,
+                                              HNS3_TXD_VLAN_B, 1);
+                               *inner_vtag = vlan_tag;
+                       }
                } else {
                        hns3_set_field(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1);
                        *inner_vtag = vlan_tag;
@@ -1012,7 +1030,6 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
        struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
        struct hns3_desc *desc = &ring->desc[ring->next_to_use];
        struct device *dev = ring_to_dev(ring);
-       u16 bdtp_fe_sc_vld_ra_ri = 0;
        struct skb_frag_struct *frag;
        unsigned int frag_buf_num;
        int k, sizeoflast;
@@ -1080,12 +1097,30 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
 
        desc_cb->length = size;
 
+       if (likely(size <= HNS3_MAX_BD_SIZE)) {
+               u16 bdtp_fe_sc_vld_ra_ri = 0;
+
+               desc_cb->priv = priv;
+               desc_cb->dma = dma;
+               desc_cb->type = type;
+               desc->addr = cpu_to_le64(dma);
+               desc->tx.send_size = cpu_to_le16(size);
+               hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri, frag_end);
+               desc->tx.bdtp_fe_sc_vld_ra_ri =
+                       cpu_to_le16(bdtp_fe_sc_vld_ra_ri);
+
+               ring_ptr_move_fw(ring, next_to_use);
+               return 0;
+       }
+
        frag_buf_num = hns3_tx_bd_count(size);
        sizeoflast = size & HNS3_TX_LAST_SIZE_M;
        sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE;
 
        /* When frag size is bigger than hardware limit, split this frag */
        for (k = 0; k < frag_buf_num; k++) {
+               u16 bdtp_fe_sc_vld_ra_ri = 0;
+
                /* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */
                desc_cb->priv = priv;
                desc_cb->dma = dma + HNS3_MAX_BD_SIZE * k;
@@ -1574,6 +1609,9 @@ static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
        struct hnae3_handle *h = hns3_get_handle(netdev);
        int ret;
 
+       if (hns3_nic_resetting(netdev))
+               return -EBUSY;
+
        if (!h->ae_algo->ops->set_mtu)
                return -EOPNOTSUPP;
 
@@ -2293,17 +2331,50 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
        }
 }
 
+static int hns3_gro_complete(struct sk_buff *skb)
+{
+       __be16 type = skb->protocol;
+       struct tcphdr *th;
+       int depth = 0;
+
+       while (type == htons(ETH_P_8021Q)) {
+               struct vlan_hdr *vh;
+
+               if ((depth + VLAN_HLEN) > skb_headlen(skb))
+                       return -EFAULT;
+
+               vh = (struct vlan_hdr *)(skb->data + depth);
+               type = vh->h_vlan_encapsulated_proto;
+               depth += VLAN_HLEN;
+       }
+
+       if (type == htons(ETH_P_IP)) {
+               depth += sizeof(struct iphdr);
+       } else if (type == htons(ETH_P_IPV6)) {
+               depth += sizeof(struct ipv6hdr);
+       } else {
+               netdev_err(skb->dev,
+                          "Error: FW GRO supports only IPv4/IPv6, not 0x%04x, depth: %d\n",
+                          be16_to_cpu(type), depth);
+               return -EFAULT;
+       }
+
+       th = (struct tcphdr *)(skb->data + depth);
+       skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
+       if (th->cwr)
+               skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
+
+       skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+       return 0;
+}
+
 static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
-                            struct hns3_desc *desc)
+                            u32 l234info, u32 bd_base_info)
 {
        struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
        int l3_type, l4_type;
-       u32 bd_base_info;
        int ol4_type;
-       u32 l234info;
-
-       bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
-       l234info = le32_to_cpu(desc->rx.l234_info);
 
        skb->ip_summed = CHECKSUM_NONE;
 
@@ -2312,12 +2383,6 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
        if (!(netdev->features & NETIF_F_RXCSUM))
                return;
 
-       /* We MUST enable hardware checksum before enabling hardware GRO */
-       if (skb_shinfo(skb)->gso_size) {
-               skb->ip_summed = CHECKSUM_UNNECESSARY;
-               return;
-       }
-
        /* check if hardware has done checksum */
        if (!(bd_base_info & BIT(HNS3_RXD_L3L4P_B)))
                return;
@@ -2370,6 +2435,7 @@ static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
                                struct hns3_desc *desc, u32 l234info,
                                u16 *vlan_tag)
 {
+       struct hnae3_handle *handle = ring->tqp->handle;
        struct pci_dev *pdev = ring->tqp->handle->pdev;
 
        if (pdev->revision == 0x20) {
@@ -2382,14 +2448,35 @@ static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
 
 #define HNS3_STRP_OUTER_VLAN   0x1
 #define HNS3_STRP_INNER_VLAN   0x2
+#define HNS3_STRP_BOTH         0x3
 
+       /* Hardware always insert VLAN tag into RX descriptor when
+        * remove the tag from packet, driver needs to determine
+        * reporting which tag to stack.
+        */
        switch (hnae3_get_field(l234info, HNS3_RXD_STRP_TAGP_M,
                                HNS3_RXD_STRP_TAGP_S)) {
        case HNS3_STRP_OUTER_VLAN:
+               if (handle->port_base_vlan_state !=
+                               HNAE3_PORT_BASE_VLAN_DISABLE)
+                       return false;
+
                *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
                return true;
        case HNS3_STRP_INNER_VLAN:
+               if (handle->port_base_vlan_state !=
+                               HNAE3_PORT_BASE_VLAN_DISABLE)
+                       return false;
+
                *vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
+               return true;
+       case HNS3_STRP_BOTH:
+               if (handle->port_base_vlan_state ==
+                               HNAE3_PORT_BASE_VLAN_DISABLE)
+                       *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
+               else
+                       *vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
+
                return true;
        default:
                return false;
@@ -2512,8 +2599,9 @@ static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc,
        return 0;
 }
 
-static void hns3_set_gro_param(struct sk_buff *skb, u32 l234info,
-                              u32 bd_base_info)
+static int hns3_set_gro_and_checksum(struct hns3_enet_ring *ring,
+                                    struct sk_buff *skb, u32 l234info,
+                                    u32 bd_base_info)
 {
        u16 gro_count;
        u32 l3_type;
@@ -2521,12 +2609,11 @@ static void hns3_set_gro_param(struct sk_buff *skb, u32 l234info,
        gro_count = hnae3_get_field(l234info, HNS3_RXD_GRO_COUNT_M,
                                    HNS3_RXD_GRO_COUNT_S);
        /* if there is no HW GRO, do not set gro params */
-       if (!gro_count)
-               return;
+       if (!gro_count) {
+               hns3_rx_checksum(ring, skb, l234info, bd_base_info);
+               return 0;
+       }
 
-       /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
-        * to skb_shinfo(skb)->gso_segs
-        */
        NAPI_GRO_CB(skb)->count = gro_count;
 
        l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M,
@@ -2536,13 +2623,13 @@ static void hns3_set_gro_param(struct sk_buff *skb, u32 l234info,
        else if (l3_type == HNS3_L3_TYPE_IPV6)
                skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
        else
-               return;
+               return -EFAULT;
 
        skb_shinfo(skb)->gso_size = hnae3_get_field(bd_base_info,
                                                    HNS3_RXD_GRO_SIZE_M,
                                                    HNS3_RXD_GRO_SIZE_S);
-       if (skb_shinfo(skb)->gso_size)
-               tcp_gro_complete(skb);
+
+       return  hns3_gro_complete(skb);
 }
 
 static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring,
@@ -2567,16 +2654,85 @@ static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring,
        skb_set_hash(skb, le32_to_cpu(desc->rx.rss_hash), rss_type);
 }
 
-static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
-                            struct sk_buff **out_skb)
+static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb,
+                             struct hns3_desc *desc)
 {
        struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
+       u32 bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
+       u32 l234info = le32_to_cpu(desc->rx.l234_info);
        enum hns3_pkt_l2t_type l2_frame_type;
+       unsigned int len;
+       int ret;
+
+       /* Based on hw strategy, the tag offloaded will be stored at
+        * ot_vlan_tag in two layer tag case, and stored at vlan_tag
+        * in one layer tag case.
+        */
+       if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
+               u16 vlan_tag;
+
+               if (hns3_parse_vlan_tag(ring, desc, l234info, &vlan_tag))
+                       __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+                                              vlan_tag);
+       }
+
+       if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B)))) {
+               u64_stats_update_begin(&ring->syncp);
+               ring->stats.non_vld_descs++;
+               u64_stats_update_end(&ring->syncp);
+
+               return -EINVAL;
+       }
+
+       if (unlikely(!desc->rx.pkt_len || (l234info & (BIT(HNS3_RXD_TRUNCAT_B) |
+                                 BIT(HNS3_RXD_L2E_B))))) {
+               u64_stats_update_begin(&ring->syncp);
+               if (l234info & BIT(HNS3_RXD_L2E_B))
+                       ring->stats.l2_err++;
+               else
+                       ring->stats.err_pkt_len++;
+               u64_stats_update_end(&ring->syncp);
+
+               return -EFAULT;
+       }
+
+       len = skb->len;
+
+       /* Do update ip stack process */
+       skb->protocol = eth_type_trans(skb, netdev);
+
+       /* This is needed in order to enable forwarding support */
+       ret = hns3_set_gro_and_checksum(ring, skb, l234info, bd_base_info);
+       if (unlikely(ret)) {
+               u64_stats_update_begin(&ring->syncp);
+               ring->stats.rx_err_cnt++;
+               u64_stats_update_end(&ring->syncp);
+               return ret;
+       }
+
+       l2_frame_type = hnae3_get_field(l234info, HNS3_RXD_DMAC_M,
+                                       HNS3_RXD_DMAC_S);
+
+       u64_stats_update_begin(&ring->syncp);
+       ring->stats.rx_pkts++;
+       ring->stats.rx_bytes += len;
+
+       if (l2_frame_type == HNS3_L2_TYPE_MULTICAST)
+               ring->stats.rx_multicast++;
+
+       u64_stats_update_end(&ring->syncp);
+
+       ring->tqp_vector->rx_group.total_bytes += len;
+       return 0;
+}
+
+static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
+                            struct sk_buff **out_skb)
+{
        struct sk_buff *skb = ring->skb;
        struct hns3_desc_cb *desc_cb;
        struct hns3_desc *desc;
        u32 bd_base_info;
-       u32 l234info;
        int length;
        int ret;
 
@@ -2636,62 +2792,12 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
                       ALIGN(ring->pull_len, sizeof(long)));
        }
 
-       l234info = le32_to_cpu(desc->rx.l234_info);
-       bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
-
-       /* Based on hw strategy, the tag offloaded will be stored at
-        * ot_vlan_tag in two layer tag case, and stored at vlan_tag
-        * in one layer tag case.
-        */
-       if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
-               u16 vlan_tag;
-
-               if (hns3_parse_vlan_tag(ring, desc, l234info, &vlan_tag))
-                       __vlan_hwaccel_put_tag(skb,
-                                              htons(ETH_P_8021Q),
-                                              vlan_tag);
-       }
-
-       if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B)))) {
-               u64_stats_update_begin(&ring->syncp);
-               ring->stats.non_vld_descs++;
-               u64_stats_update_end(&ring->syncp);
-
+       ret = hns3_handle_bdinfo(ring, skb, desc);
+       if (unlikely(ret)) {
                dev_kfree_skb_any(skb);
-               return -EINVAL;
-       }
-
-       if (unlikely((!desc->rx.pkt_len) ||
-                    (l234info & (BIT(HNS3_RXD_TRUNCAT_B) |
-                                 BIT(HNS3_RXD_L2E_B))))) {
-               u64_stats_update_begin(&ring->syncp);
-               if (l234info & BIT(HNS3_RXD_L2E_B))
-                       ring->stats.l2_err++;
-               else
-                       ring->stats.err_pkt_len++;
-               u64_stats_update_end(&ring->syncp);
-
-               dev_kfree_skb_any(skb);
-               return -EFAULT;
+               return ret;
        }
 
-
-       l2_frame_type = hnae3_get_field(l234info, HNS3_RXD_DMAC_M,
-                                       HNS3_RXD_DMAC_S);
-       u64_stats_update_begin(&ring->syncp);
-       if (l2_frame_type == HNS3_L2_TYPE_MULTICAST)
-               ring->stats.rx_multicast++;
-
-       ring->stats.rx_pkts++;
-       ring->stats.rx_bytes += skb->len;
-       u64_stats_update_end(&ring->syncp);
-
-       ring->tqp_vector->rx_group.total_bytes += skb->len;
-
-       /* This is needed in order to enable forwarding support */
-       hns3_set_gro_param(skb, l234info, bd_base_info);
-
-       hns3_rx_checksum(ring, skb, desc);
        *out_skb = skb;
        hns3_set_rx_skb_rss_type(ring, skb);
 
@@ -2703,7 +2809,6 @@ int hns3_clean_rx_ring(
                void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *))
 {
 #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
-       struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
        int recv_pkts, recv_bds, clean_count, err;
        int unused_count = hns3_desc_unused(ring) - ring->pending_buf;
        struct sk_buff *skb = ring->skb;
@@ -2740,8 +2845,6 @@ int hns3_clean_rx_ring(
                        continue;
                }
 
-               /* Do update ip stack process */
-               skb->protocol = eth_type_trans(skb, netdev);
                rx_fn(ring, skb);
                recv_bds += ring->pending_buf;
                clean_count += ring->pending_buf;
@@ -2891,7 +2994,7 @@ static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
        struct hns3_enet_tqp_vector *tqp_vector =
                container_of(napi, struct hns3_enet_tqp_vector, napi);
        bool clean_complete = true;
-       int rx_budget;
+       int rx_budget = budget;
 
        if (unlikely(test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) {
                napi_complete(napi);
@@ -2905,7 +3008,8 @@ static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
                hns3_clean_tx_ring(ring);
 
        /* make sure rx ring budget not smaller than 1 */
-       rx_budget = max(budget / tqp_vector->num_tqps, 1);
+       if (tqp_vector->num_tqps > 1)
+               rx_budget = max(budget / tqp_vector->num_tqps, 1);
 
        hns3_for_each_ring(ring, tqp_vector->rx_group) {
                int rx_cleaned = hns3_clean_rx_ring(ring, rx_budget,
@@ -3697,13 +3801,13 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
        struct hns3_nic_priv *priv = netdev_priv(netdev);
        int ret;
 
-       hns3_client_stop(handle);
-
        hns3_remove_hw_addr(netdev);
 
        if (netdev->reg_state != NETREG_UNINITIALIZED)
                unregister_netdev(netdev);
 
+       hns3_client_stop(handle);
+
        if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
                netdev_warn(netdev, "already uninitialized\n");
                goto out_netdev_free;
@@ -3773,12 +3877,13 @@ static int hns3_recover_hw_addr(struct net_device *ndev)
        struct netdev_hw_addr *ha, *tmp;
        int ret = 0;
 
+       netif_addr_lock_bh(ndev);
        /* go through and sync uc_addr entries to the device */
        list = &ndev->uc;
        list_for_each_entry_safe(ha, tmp, &list->list, list) {
                ret = hns3_nic_uc_sync(ndev, ha->addr);
                if (ret)
-                       return ret;
+                       goto out;
        }
 
        /* go through and sync mc_addr entries to the device */
@@ -3786,9 +3891,11 @@ static int hns3_recover_hw_addr(struct net_device *ndev)
        list_for_each_entry_safe(ha, tmp, &list->list, list) {
                ret = hns3_nic_mc_sync(ndev, ha->addr);
                if (ret)
-                       return ret;
+                       goto out;
        }
 
+out:
+       netif_addr_unlock_bh(ndev);
        return ret;
 }
 
@@ -3799,6 +3906,7 @@ static void hns3_remove_hw_addr(struct net_device *netdev)
 
        hns3_nic_uc_unsync(netdev, netdev->dev_addr);
 
+       netif_addr_lock_bh(netdev);
        /* go through and unsync uc_addr entries to the device */
        list = &netdev->uc;
        list_for_each_entry_safe(ha, tmp, &list->list, list)
@@ -3809,6 +3917,8 @@ static void hns3_remove_hw_addr(struct net_device *netdev)
        list_for_each_entry_safe(ha, tmp, &list->list, list)
                if (ha->refcount > 1)
                        hns3_nic_mc_unsync(netdev, ha->addr);
+
+       netif_addr_unlock_bh(netdev);
 }
 
 static void hns3_clear_tx_ring(struct hns3_enet_ring *ring)
@@ -3850,6 +3960,13 @@ static int hns3_clear_rx_ring(struct hns3_enet_ring *ring)
                ring_ptr_move_fw(ring, next_to_use);
        }
 
+       /* Free the pending skb in rx ring */
+       if (ring->skb) {
+               dev_kfree_skb_any(ring->skb);
+               ring->skb = NULL;
+               ring->pending_buf = 0;
+       }
+
        return 0;
 }
 
@@ -4048,10 +4165,18 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
        if (ret)
                goto err_uninit_vector;
 
+       ret = hns3_client_start(handle);
+       if (ret) {
+               dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret);
+               goto err_uninit_ring;
+       }
+
        set_bit(HNS3_NIC_STATE_INITED, &priv->state);
 
        return ret;
 
+err_uninit_ring:
+       hns3_uninit_all_ring(priv);
 err_uninit_vector:
        hns3_nic_uninit_vector_data(priv);
        priv->ring_data = NULL;
@@ -4101,7 +4226,7 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
        struct hns3_nic_priv *priv = netdev_priv(netdev);
        int ret;
 
-       if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
+       if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
                netdev_warn(netdev, "already uninitialized\n");
                return 0;
        }
@@ -4123,8 +4248,6 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
        hns3_put_ring_config(priv);
        priv->ring_data = NULL;
 
-       clear_bit(HNS3_NIC_STATE_INITED, &priv->state);
-
        return ret;
 }
 
index 75669cd0c31145fd763959f226175452dbb399bf..025d0f7f860dbd261692fdaacf38c57c90c95a56 100644 (file)
@@ -577,18 +577,13 @@ union l4_hdr_info {
        unsigned char *hdr;
 };
 
-/* the distance between [begin, end) in a ring buffer
- * note: there is a unuse slot between the begin and the end
- */
-static inline int ring_dist(struct hns3_enet_ring *ring, int begin, int end)
-{
-       return (end - begin + ring->desc_num) % ring->desc_num;
-}
-
 static inline int ring_space(struct hns3_enet_ring *ring)
 {
-       return ring->desc_num -
-               ring_dist(ring, ring->next_to_clean, ring->next_to_use) - 1;
+       int begin = ring->next_to_clean;
+       int end = ring->next_to_use;
+
+       return ((end >= begin) ? (ring->desc_num - end + begin) :
+                       (begin - end)) - 1;
 }
 
 static inline int is_ring_empty(struct hns3_enet_ring *ring)
index 359d4731fb2db5c6a3bb112d8136669736533137..59ef272297abb5635ea5fba194bdebe1d7576c12 100644 (file)
@@ -483,6 +483,11 @@ static void hns3_get_stats(struct net_device *netdev,
        struct hnae3_handle *h = hns3_get_handle(netdev);
        u64 *p = data;
 
+       if (hns3_nic_resetting(netdev)) {
+               netdev_err(netdev, "dev resetting, could not get stats\n");
+               return;
+       }
+
        if (!h->ae_algo->ops->get_stats || !h->ae_algo->ops->update_stats) {
                netdev_err(netdev, "could not get any statistics\n");
                return;
@@ -648,6 +653,10 @@ static int hns3_get_link_ksettings(struct net_device *netdev,
 static int hns3_set_link_ksettings(struct net_device *netdev,
                                   const struct ethtool_link_ksettings *cmd)
 {
+       /* Chip doesn't support this mode. */
+       if (cmd->base.speed == SPEED_1000 && cmd->base.duplex == DUPLEX_HALF)
+               return -EINVAL;
+
        /* Only support ksettings_set for netdev with phy attached for now */
        if (netdev->phydev)
                return phy_ethtool_ksettings_set(netdev->phydev, cmd);
index fffe8c1c45d394b2a0723443582d6427ffa6e387..0fb61d440d3bb96a1d5b24b4c5380b3b0c33de4e 100644 (file)
@@ -3,7 +3,7 @@
 # Makefile for the HISILICON network device drivers.
 #
 
-ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3
+ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3
 
 obj-$(CONFIG_HNS3_HCLGE) += hclge.o
 hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o hclge_err.o  hclge_debugfs.o
index 722bb3124bb69713bdc7e80a187bfa2dcc62bd2b..fbd904e3077cf6219b270df37a0058140a946784 100644 (file)
@@ -355,7 +355,7 @@ int hclge_cmd_init(struct hclge_dev *hdev)
        int ret;
 
        spin_lock_bh(&hdev->hw.cmq.csq.lock);
-       spin_lock_bh(&hdev->hw.cmq.crq.lock);
+       spin_lock(&hdev->hw.cmq.crq.lock);
 
        hdev->hw.cmq.csq.next_to_clean = 0;
        hdev->hw.cmq.csq.next_to_use = 0;
@@ -364,7 +364,7 @@ int hclge_cmd_init(struct hclge_dev *hdev)
 
        hclge_cmd_init_regs(&hdev->hw);
 
-       spin_unlock_bh(&hdev->hw.cmq.crq.lock);
+       spin_unlock(&hdev->hw.cmq.crq.lock);
        spin_unlock_bh(&hdev->hw.cmq.csq.lock);
 
        clear_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
@@ -373,21 +373,26 @@ int hclge_cmd_init(struct hclge_dev *hdev)
         * reset may happen when lower level reset is being processed.
         */
        if ((hclge_is_reset_pending(hdev))) {
-               set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
-               return -EBUSY;
+               ret = -EBUSY;
+               goto err_cmd_init;
        }
 
        ret = hclge_cmd_query_firmware_version(&hdev->hw, &version);
        if (ret) {
                dev_err(&hdev->pdev->dev,
                        "firmware version query failed %d\n", ret);
-               return ret;
+               goto err_cmd_init;
        }
        hdev->fw_version = version;
 
        dev_info(&hdev->pdev->dev, "The firmware version is %08x\n", version);
 
        return 0;
+
+err_cmd_init:
+       set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+
+       return ret;
 }
 
 static void hclge_cmd_uninit_regs(struct hclge_hw *hw)
index 1f52d11f77b55c58d837918f384e172158caaca0..62ef1619143b5fc928a55a27164525135ca57f8f 100644 (file)
 #include "hclge_err.h"
 
 static const struct hclge_hw_error hclge_imp_tcm_ecc_int[] = {
-       { .int_msk = BIT(1), .msg = "imp_itcm0_ecc_mbit_err" },
-       { .int_msk = BIT(3), .msg = "imp_itcm1_ecc_mbit_err" },
-       { .int_msk = BIT(5), .msg = "imp_itcm2_ecc_mbit_err" },
-       { .int_msk = BIT(7), .msg = "imp_itcm3_ecc_mbit_err" },
-       { .int_msk = BIT(9), .msg = "imp_dtcm0_mem0_ecc_mbit_err" },
-       { .int_msk = BIT(11), .msg = "imp_dtcm0_mem1_ecc_mbit_err" },
-       { .int_msk = BIT(13), .msg = "imp_dtcm1_mem0_ecc_mbit_err" },
-       { .int_msk = BIT(15), .msg = "imp_dtcm1_mem1_ecc_mbit_err" },
-       { .int_msk = BIT(17), .msg = "imp_itcm4_ecc_mbit_err" },
+       { .int_msk = BIT(1), .msg = "imp_itcm0_ecc_mbit_err",
+         .reset_level = HNAE3_NONE_RESET },
+       { .int_msk = BIT(3), .msg = "imp_itcm1_ecc_mbit_err",
+         .reset_level = HNAE3_NONE_RESET },
+       { .int_msk = BIT(5), .msg = "imp_itcm2_ecc_mbit_err",
+         .reset_level = HNAE3_NONE_RESET },
+       { .int_msk = BIT(7), .msg = "imp_itcm3_ecc_mbit_err",
+         .reset_level = HNAE3_NONE_RESET },
+       { .int_msk = BIT(9), .msg = "imp_dtcm0_mem0_ecc_mbit_err",
+         .reset_level = HNAE3_NONE_RESET },
+       { .int_msk = BIT(11), .msg = "imp_dtcm0_mem1_ecc_mbit_err",
+         .reset_level = HNAE3_NONE_RESET },
+       { .int_msk = BIT(13), .msg = "imp_dtcm1_mem0_ecc_mbit_err",
+         .reset_level = HNAE3_NONE_RESET },
+       { .int_msk = BIT(15), .msg = "imp_dtcm1_mem1_ecc_mbit_err",
+         .reset_level = HNAE3_NONE_RESET },
+       { .int_msk = BIT(17), .msg = "imp_itcm4_ecc_mbit_err",
+         .reset_level = HNAE3_NONE_RESET },
        { /* sentinel */ }
 };
 
 static const struct hclge_hw_error hclge_cmdq_nic_mem_ecc_int[] = {
-       { .int_msk = BIT(1), .msg = "cmdq_nic_rx_depth_ecc_mbit_err" },
-       { .int_msk = BIT(3), .msg = "cmdq_nic_tx_depth_ecc_mbit_err" },
-       { .int_msk = BIT(5), .msg = "cmdq_nic_rx_tail_ecc_mbit_err" },
-       { .int_msk = BIT(7), .msg = "cmdq_nic_tx_tail_ecc_mbit_err" },
-       { .int_msk = BIT(9), .msg = "cmdq_nic_rx_head_ecc_mbit_err" },
-       { .int_msk = BIT(11), .msg = "cmdq_nic_tx_head_ecc_mbit_err" },
-       { .int_msk = BIT(13), .msg = "cmdq_nic_rx_addr_ecc_mbit_err" },
-       { .int_msk = BIT(15), .msg = "cmdq_nic_tx_addr_ecc_mbit_err" },
-       { .int_msk = BIT(17), .msg = "cmdq_rocee_rx_depth_ecc_mbit_err" },
-       { .int_msk = BIT(19), .msg = "cmdq_rocee_tx_depth_ecc_mbit_err" },
-       { .int_msk = BIT(21), .msg = "cmdq_rocee_rx_tail_ecc_mbit_err" },
-       { .int_msk = BIT(23), .msg = "cmdq_rocee_tx_tail_ecc_mbit_err" },
-       { .int_msk = BIT(25), .msg = "cmdq_rocee_rx_head_ecc_mbit_err" },
-       { .int_msk = BIT(27), .msg = "cmdq_rocee_tx_head_ecc_mbit_err" },
-       { .int_msk = BIT(29), .msg = "cmdq_rocee_rx_addr_ecc_mbit_err" },
-       { .int_msk = BIT(31), .msg = "cmdq_rocee_tx_addr_ecc_mbit_err" },
+       { .int_msk = BIT(1), .msg = "cmdq_nic_rx_depth_ecc_mbit_err",
+         .reset_level = HNAE3_NONE_RESET },
+       { .int_msk = BIT(3), .msg = "cmdq_nic_tx_depth_ecc_mbit_err",
+         .reset_level = HNAE3_NONE_RESET },
+       { .int_msk = BIT(5), .msg = "cmdq_nic_rx_tail_ecc_mbit_err",
+         .reset_level = HNAE3_NONE_RESET },
+       { .int_msk = BIT(7), .msg = "cmdq_nic_tx_tail_ecc_mbit_err",
+         .reset_level = HNAE3_NONE_RESET },
+       { .int_msk = BIT(9), .msg = "cmdq_nic_rx_head_ecc_mbit_err",
+         .reset_level = HNAE3_NONE_RESET },
+       { .int_msk = BIT(11), .msg = "cmdq_nic_tx_head_ecc_mbit_err",
+         .reset_level = HNAE3_NONE_RESET },
+       { .int_msk = BIT(13), .msg = "cmdq_nic_rx_addr_ecc_mbit_err",
+         .reset_level = HNAE3_NONE_RESET },
+       { .int_msk = BIT(15), .msg = "cmdq_nic_tx_addr_ecc_mbit_err",
+         .reset_level = HNAE3_NONE_RESET },
+       { .int_msk = BIT(17), .msg = "cmdq_rocee_rx_depth_ecc_mbit_err",
+         .reset_level = HNAE3_NONE_RESET },
+       { .int_msk = BIT(19), .msg = "cmdq_rocee_tx_depth_ecc_mbit_err",
+         .reset_level = HNAE3_NONE_RESET },
+       { .int_msk = BIT(21), .msg = "cmdq_rocee_rx_tail_ecc_mbit_err",
+         .reset_level = HNAE3_NONE_RESET },
+       { .int_msk = BIT(23), .msg = "cmdq_rocee_tx_tail_ecc_mbit_err",
+         .reset_level = HNAE3_NONE_RESET },
+       { .int_msk = BIT(25), .msg = "cmdq_rocee_rx_head_ecc_mbit_err",
+         .reset_level = HNAE3_NONE_RESET },
+       { .int_msk = BIT(27), .msg = "cmdq_rocee_tx_head_ecc_mbit_err",
+         .reset_level = HNAE3_NONE_RESET },
+       { .int_msk = BIT(29), .msg = "cmdq_rocee_rx_addr_ecc_mbit_err",
+         .reset_level = HNAE3_NONE_RESET },
+       { .int_msk = BIT(31), .msg = "cmdq_rocee_tx_addr_ecc_mbit_err",
+         .reset_level = HNAE3_NONE_RESET },
        { /* sentinel */ }
 };
 
 static const struct hclge_hw_error hclge_tqp_int_ecc_int[] = {
-       { .int_msk = BIT(6), .msg = "tqp_int_cfg_even_ecc_mbit_err" },
-       { .int_msk = BIT(7), .msg = "tqp_int_cfg_odd_ecc_mbit_err" },
-       { .int_msk = BIT(8), .msg = "tqp_int_ctrl_even_ecc_mbit_err" },
-       { .int_msk = BIT(9), .msg = "tqp_int_ctrl_odd_ecc_mbit_err" },
-       { .int_msk = BIT(10), .msg = "tx_que_scan_int_ecc_mbit_err" },
-       { .int_msk = BIT(11), .msg = "rx_que_scan_int_ecc_mbit_err" },
+       { .int_msk = BIT(6), .msg = "tqp_int_cfg_even_ecc_mbit_err",
+         .reset_level = HNAE3_NONE_RESET },
+       { .int_msk = BIT(7), .msg = "tqp_int_cfg_odd_ecc_mbit_err",
+         .reset_level = HNAE3_NONE_RESET },
+       { .int_msk = BIT(8), .msg = "tqp_int_ctrl_even_ecc_mbit_err",
+         .reset_level = HNAE3_NONE_RESET },
+       { .int_msk = BIT(9), .msg = "tqp_int_ctrl_odd_ecc_mbit_err",
+         .reset_level = HNAE3_NONE_RESET },
+       { .int_msk = BIT(10), .msg = "tx_que_scan_int_ecc_mbit_err",
+         .reset_level = HNAE3_NONE_RESET },
+       { .int_msk = BIT(11), .msg = "rx_que_scan_int_ecc_mbit_err",
+         .reset_level = HNAE3_NONE_RESET },
        { /* sentinel */ }
 };
 
 static const struct hclge_hw_error hclge_msix_sram_ecc_int[] = {
-       { .int_msk = BIT(1), .msg = "msix_nic_ecc_mbit_err" },
-       { .int_msk = BIT(3), .msg = "msix_rocee_ecc_mbit_err" },
+       { .int_msk = BIT(1), .msg = "msix_nic_ecc_mbit_err",
+         .reset_level = HNAE3_NONE_RESET },
+       { .int_msk = BIT(3), .msg = "msix_rocee_ecc_mbit_err",
+         .reset_level = HNAE3_NONE_RESET },
        { /* sentinel */ }
 };
 
 static const struct hclge_hw_error hclge_igu_int[] = {
-       { .int_msk = BIT(0), .msg = "igu_rx_buf0_ecc_mbit_err" },
-       { .int_msk = BIT(2), .msg = "igu_rx_buf1_ecc_mbit_err" },
+       { .int_msk = BIT(0), .msg = "igu_rx_buf0_ecc_mbit_err",
+         .reset_level = HNAE3_CORE_RESET },
+       { .int_msk = BIT(2), .msg = "igu_rx_buf1_ecc_mbit_err",
+         .reset_level = HNAE3_CORE_RESET },
        { /* sentinel */ }
 };
 
 static const struct hclge_hw_error hclge_igu_egu_tnl_int[] = {
-       { .int_msk = BIT(0), .msg = "rx_buf_overflow" },
-       { .int_msk = BIT(1), .msg = "rx_stp_fifo_overflow" },
-       { .int_msk = BIT(2), .msg = "rx_stp_fifo_undeflow" },
-       { .int_msk = BIT(3), .msg = "tx_buf_overflow" },
-       { .int_msk = BIT(4), .msg = "tx_buf_underrun" },
-       { .int_msk = BIT(5), .msg = "rx_stp_buf_overflow" },
+       { .int_msk = BIT(0), .msg = "rx_buf_overflow",
+         .reset_level = HNAE3_CORE_RESET },
+       { .int_msk = BIT(1), .msg = "rx_stp_fifo_overflow",
+         .reset_level = HNAE3_CORE_RESET },
+       { .int_msk = BIT(2), .msg = "rx_stp_fifo_undeflow",
+         .reset_level = HNAE3_CORE_RESET },
+       { .int_msk = BIT(3), .msg = "tx_buf_overflow",
+         .reset_level = HNAE3_CORE_RESET },
+       { .int_msk = BIT(4), .msg = "tx_buf_underrun",
+         .reset_level = HNAE3_CORE_RESET },
+       { .int_msk = BIT(5), .msg = "rx_stp_buf_overflow",
+         .reset_level = HNAE3_CORE_RESET },
        { /* sentinel */ }
 };
 
 static const struct hclge_hw_error hclge_ncsi_err_int[] = {
-       { .int_msk = BIT(1), .msg = "ncsi_tx_ecc_mbit_err" },
+       { .int_msk = BIT(1), .msg = "ncsi_tx_ecc_mbit_err",
+         .reset_level = HNAE3_NONE_RESET },
        { /* sentinel */ }
 };
 
 static const struct hclge_hw_error hclge_ppp_mpf_abnormal_int_st1[] = {
-       { .int_msk = BIT(0), .msg = "vf_vlan_ad_mem_ecc_mbit_err" },
-       { .int_msk = BIT(1), .msg = "umv_mcast_group_mem_ecc_mbit_err" },
-       { .int_msk = BIT(2), .msg = "umv_key_mem0_ecc_mbit_err" },
-       { .int_msk = BIT(3), .msg = "umv_key_mem1_ecc_mbit_err" },
-       { .int_msk = BIT(4), .msg = "umv_key_mem2_ecc_mbit_err" },
-       { .int_msk = BIT(5), .msg = "umv_key_mem3_ecc_mbit_err" },
-       { .int_msk = BIT(6), .msg = "umv_ad_mem_ecc_mbit_err" },
-       { .int_msk = BIT(7), .msg = "rss_tc_mode_mem_ecc_mbit_err" },
-       { .int_msk = BIT(8), .msg = "rss_idt_mem0_ecc_mbit_err" },
-       { .int_msk = BIT(9), .msg = "rss_idt_mem1_ecc_mbit_err" },
-       { .int_msk = BIT(10), .msg = "rss_idt_mem2_ecc_mbit_err" },
-       { .int_msk = BIT(11), .msg = "rss_idt_mem3_ecc_mbit_err" },
-       { .int_msk = BIT(12), .msg = "rss_idt_mem4_ecc_mbit_err" },
-       { .int_msk = BIT(13), .msg = "rss_idt_mem5_ecc_mbit_err" },
-       { .int_msk = BIT(14), .msg = "rss_idt_mem6_ecc_mbit_err" },
-       { .int_msk = BIT(15), .msg = "rss_idt_mem7_ecc_mbit_err" },
-       { .int_msk = BIT(16), .msg = "rss_idt_mem8_ecc_mbit_err" },
-       { .int_msk = BIT(17), .msg = "rss_idt_mem9_ecc_mbit_err" },
-       { .int_msk = BIT(18), .msg = "rss_idt_mem10_ecc_m1bit_err" },
-       { .int_msk = BIT(19), .msg = "rss_idt_mem11_ecc_mbit_err" },
-       { .int_msk = BIT(20), .msg = "rss_idt_mem12_ecc_mbit_err" },
-       { .int_msk = BIT(21), .msg = "rss_idt_mem13_ecc_mbit_err" },
-       { .int_msk = BIT(22), .msg = "rss_idt_mem14_ecc_mbit_err" },
-       { .int_msk = BIT(23), .msg = "rss_idt_mem15_ecc_mbit_err" },
-       { .int_msk = BIT(24), .msg = "port_vlan_mem_ecc_mbit_err" },
-       { .int_msk = BIT(25), .msg = "mcast_linear_table_mem_ecc_mbit_err" },
-       { .int_msk = BIT(26), .msg = "mcast_result_mem_ecc_mbit_err" },
-       { .int_msk = BIT(27),
-               .msg = "flow_director_ad_mem0_ecc_mbit_err" },
-       { .int_msk = BIT(28),
-               .msg = "flow_director_ad_mem1_ecc_mbit_err" },
-       { .int_msk = BIT(29),
-               .msg = "rx_vlan_tag_memory_ecc_mbit_err" },
-       { .int_msk = BIT(30),
-               .msg = "Tx_UP_mapping_config_mem_ecc_mbit_err" },
+       { .int_msk = BIT(0), .msg = "vf_vlan_ad_mem_ecc_mbit_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(1), .msg = "umv_mcast_group_mem_ecc_mbit_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(2), .msg = "umv_key_mem0_ecc_mbit_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(3), .msg = "umv_key_mem1_ecc_mbit_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(4), .msg = "umv_key_mem2_ecc_mbit_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(5), .msg = "umv_key_mem3_ecc_mbit_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(6), .msg = "umv_ad_mem_ecc_mbit_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(7), .msg = "rss_tc_mode_mem_ecc_mbit_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(8), .msg = "rss_idt_mem0_ecc_mbit_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(9), .msg = "rss_idt_mem1_ecc_mbit_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(10), .msg = "rss_idt_mem2_ecc_mbit_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(11), .msg = "rss_idt_mem3_ecc_mbit_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(12), .msg = "rss_idt_mem4_ecc_mbit_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(13), .msg = "rss_idt_mem5_ecc_mbit_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(14), .msg = "rss_idt_mem6_ecc_mbit_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(15), .msg = "rss_idt_mem7_ecc_mbit_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(16), .msg = "rss_idt_mem8_ecc_mbit_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(17), .msg = "rss_idt_mem9_ecc_mbit_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(18), .msg = "rss_idt_mem10_ecc_m1bit_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(19), .msg = "rss_idt_mem11_ecc_mbit_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(20), .msg = "rss_idt_mem12_ecc_mbit_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(21), .msg = "rss_idt_mem13_ecc_mbit_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(22), .msg = "rss_idt_mem14_ecc_mbit_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(23), .msg = "rss_idt_mem15_ecc_mbit_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(24), .msg = "port_vlan_mem_ecc_mbit_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(25), .msg = "mcast_linear_table_mem_ecc_mbit_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(26), .msg = "mcast_result_mem_ecc_mbit_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(27), .msg = "flow_director_ad_mem0_ecc_mbit_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(28), .msg = "flow_director_ad_mem1_ecc_mbit_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(29), .msg = "rx_vlan_tag_memory_ecc_mbit_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(30), .msg = "Tx_UP_mapping_config_mem_ecc_mbit_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
        { /* sentinel */ }
 };
 
 static const struct hclge_hw_error hclge_ppp_pf_abnormal_int[] = {
-       { .int_msk = BIT(0), .msg = "tx_vlan_tag_err" },
-       { .int_msk = BIT(1), .msg = "rss_list_tc_unassigned_queue_err" },
+       { .int_msk = BIT(0), .msg = "tx_vlan_tag_err",
+         .reset_level = HNAE3_NONE_RESET },
+       { .int_msk = BIT(1), .msg = "rss_list_tc_unassigned_queue_err",
+         .reset_level = HNAE3_NONE_RESET },
        { /* sentinel */ }
 };
 
 static const struct hclge_hw_error hclge_ppp_mpf_abnormal_int_st3[] = {
-       { .int_msk = BIT(0), .msg = "hfs_fifo_mem_ecc_mbit_err" },
-       { .int_msk = BIT(1), .msg = "rslt_descr_fifo_mem_ecc_mbit_err" },
-       { .int_msk = BIT(2), .msg = "tx_vlan_tag_mem_ecc_mbit_err" },
-       { .int_msk = BIT(3), .msg = "FD_CN0_memory_ecc_mbit_err" },
-       { .int_msk = BIT(4), .msg = "FD_CN1_memory_ecc_mbit_err" },
-       { .int_msk = BIT(5), .msg = "GRO_AD_memory_ecc_mbit_err" },
+       { .int_msk = BIT(0), .msg = "hfs_fifo_mem_ecc_mbit_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(1), .msg = "rslt_descr_fifo_mem_ecc_mbit_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(2), .msg = "tx_vlan_tag_mem_ecc_mbit_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(3), .msg = "FD_CN0_memory_ecc_mbit_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(4), .msg = "FD_CN1_memory_ecc_mbit_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(5), .msg = "GRO_AD_memory_ecc_mbit_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
        { /* sentinel */ }
 };
 
 static const struct hclge_hw_error hclge_tm_sch_rint[] = {
-       { .int_msk = BIT(1), .msg = "tm_sch_ecc_mbit_err" },
-       { .int_msk = BIT(2), .msg = "tm_sch_port_shap_sub_fifo_wr_err" },
-       { .int_msk = BIT(3), .msg = "tm_sch_port_shap_sub_fifo_rd_err" },
-       { .int_msk = BIT(4), .msg = "tm_sch_pg_pshap_sub_fifo_wr_err" },
-       { .int_msk = BIT(5), .msg = "tm_sch_pg_pshap_sub_fifo_rd_err" },
-       { .int_msk = BIT(6), .msg = "tm_sch_pg_cshap_sub_fifo_wr_err" },
-       { .int_msk = BIT(7), .msg = "tm_sch_pg_cshap_sub_fifo_rd_err" },
-       { .int_msk = BIT(8), .msg = "tm_sch_pri_pshap_sub_fifo_wr_err" },
-       { .int_msk = BIT(9), .msg = "tm_sch_pri_pshap_sub_fifo_rd_err" },
-       { .int_msk = BIT(10), .msg = "tm_sch_pri_cshap_sub_fifo_wr_err" },
-       { .int_msk = BIT(11), .msg = "tm_sch_pri_cshap_sub_fifo_rd_err" },
-       { .int_msk = BIT(12),
-         .msg = "tm_sch_port_shap_offset_fifo_wr_err" },
-       { .int_msk = BIT(13),
-         .msg = "tm_sch_port_shap_offset_fifo_rd_err" },
-       { .int_msk = BIT(14),
-         .msg = "tm_sch_pg_pshap_offset_fifo_wr_err" },
-       { .int_msk = BIT(15),
-         .msg = "tm_sch_pg_pshap_offset_fifo_rd_err" },
-       { .int_msk = BIT(16),
-         .msg = "tm_sch_pg_cshap_offset_fifo_wr_err" },
-       { .int_msk = BIT(17),
-         .msg = "tm_sch_pg_cshap_offset_fifo_rd_err" },
-       { .int_msk = BIT(18),
-         .msg = "tm_sch_pri_pshap_offset_fifo_wr_err" },
-       { .int_msk = BIT(19),
-         .msg = "tm_sch_pri_pshap_offset_fifo_rd_err" },
-       { .int_msk = BIT(20),
-         .msg = "tm_sch_pri_cshap_offset_fifo_wr_err" },
-       { .int_msk = BIT(21),
-         .msg = "tm_sch_pri_cshap_offset_fifo_rd_err" },
-       { .int_msk = BIT(22), .msg = "tm_sch_rq_fifo_wr_err" },
-       { .int_msk = BIT(23), .msg = "tm_sch_rq_fifo_rd_err" },
-       { .int_msk = BIT(24), .msg = "tm_sch_nq_fifo_wr_err" },
-       { .int_msk = BIT(25), .msg = "tm_sch_nq_fifo_rd_err" },
-       { .int_msk = BIT(26), .msg = "tm_sch_roce_up_fifo_wr_err" },
-       { .int_msk = BIT(27), .msg = "tm_sch_roce_up_fifo_rd_err" },
-       { .int_msk = BIT(28), .msg = "tm_sch_rcb_byte_fifo_wr_err" },
-       { .int_msk = BIT(29), .msg = "tm_sch_rcb_byte_fifo_rd_err" },
-       { .int_msk = BIT(30), .msg = "tm_sch_ssu_byte_fifo_wr_err" },
-       { .int_msk = BIT(31), .msg = "tm_sch_ssu_byte_fifo_rd_err" },
+       { .int_msk = BIT(1), .msg = "tm_sch_ecc_mbit_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(2), .msg = "tm_sch_port_shap_sub_fifo_wr_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(3), .msg = "tm_sch_port_shap_sub_fifo_rd_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(4), .msg = "tm_sch_pg_pshap_sub_fifo_wr_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(5), .msg = "tm_sch_pg_pshap_sub_fifo_rd_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(6), .msg = "tm_sch_pg_cshap_sub_fifo_wr_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(7), .msg = "tm_sch_pg_cshap_sub_fifo_rd_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(8), .msg = "tm_sch_pri_pshap_sub_fifo_wr_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(9), .msg = "tm_sch_pri_pshap_sub_fifo_rd_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(10), .msg = "tm_sch_pri_cshap_sub_fifo_wr_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(11), .msg = "tm_sch_pri_cshap_sub_fifo_rd_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(12), .msg = "tm_sch_port_shap_offset_fifo_wr_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(13), .msg = "tm_sch_port_shap_offset_fifo_rd_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(14), .msg = "tm_sch_pg_pshap_offset_fifo_wr_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(15), .msg = "tm_sch_pg_pshap_offset_fifo_rd_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(16), .msg = "tm_sch_pg_cshap_offset_fifo_wr_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(17), .msg = "tm_sch_pg_cshap_offset_fifo_rd_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(18), .msg = "tm_sch_pri_pshap_offset_fifo_wr_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(19), .msg = "tm_sch_pri_pshap_offset_fifo_rd_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(20), .msg = "tm_sch_pri_cshap_offset_fifo_wr_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(21), .msg = "tm_sch_pri_cshap_offset_fifo_rd_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(22), .msg = "tm_sch_rq_fifo_wr_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(23), .msg = "tm_sch_rq_fifo_rd_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(24), .msg = "tm_sch_nq_fifo_wr_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(25), .msg = "tm_sch_nq_fifo_rd_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(26), .msg = "tm_sch_roce_up_fifo_wr_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(27), .msg = "tm_sch_roce_up_fifo_rd_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(28), .msg = "tm_sch_rcb_byte_fifo_wr_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(29), .msg = "tm_sch_rcb_byte_fifo_rd_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(30), .msg = "tm_sch_ssu_byte_fifo_wr_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(31), .msg = "tm_sch_ssu_byte_fifo_rd_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
        { /* sentinel */ }
 };
 
 static const struct hclge_hw_error hclge_qcn_fifo_rint[] = {
-       { .int_msk = BIT(0), .msg = "qcn_shap_gp0_sch_fifo_rd_err" },
-       { .int_msk = BIT(1), .msg = "qcn_shap_gp0_sch_fifo_wr_err" },
-       { .int_msk = BIT(2), .msg = "qcn_shap_gp1_sch_fifo_rd_err" },
-       { .int_msk = BIT(3), .msg = "qcn_shap_gp1_sch_fifo_wr_err" },
-       { .int_msk = BIT(4), .msg = "qcn_shap_gp2_sch_fifo_rd_err" },
-       { .int_msk = BIT(5), .msg = "qcn_shap_gp2_sch_fifo_wr_err" },
-       { .int_msk = BIT(6), .msg = "qcn_shap_gp3_sch_fifo_rd_err" },
-       { .int_msk = BIT(7), .msg = "qcn_shap_gp3_sch_fifo_wr_err" },
-       { .int_msk = BIT(8), .msg = "qcn_shap_gp0_offset_fifo_rd_err" },
-       { .int_msk = BIT(9), .msg = "qcn_shap_gp0_offset_fifo_wr_err" },
-       { .int_msk = BIT(10), .msg = "qcn_shap_gp1_offset_fifo_rd_err" },
-       { .int_msk = BIT(11), .msg = "qcn_shap_gp1_offset_fifo_wr_err" },
-       { .int_msk = BIT(12), .msg = "qcn_shap_gp2_offset_fifo_rd_err" },
-       { .int_msk = BIT(13), .msg = "qcn_shap_gp2_offset_fifo_wr_err" },
-       { .int_msk = BIT(14), .msg = "qcn_shap_gp3_offset_fifo_rd_err" },
-       { .int_msk = BIT(15), .msg = "qcn_shap_gp3_offset_fifo_wr_err" },
-       { .int_msk = BIT(16), .msg = "qcn_byte_info_fifo_rd_err" },
-       { .int_msk = BIT(17), .msg = "qcn_byte_info_fifo_wr_err" },
+       { .int_msk = BIT(0), .msg = "qcn_shap_gp0_sch_fifo_rd_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(1), .msg = "qcn_shap_gp0_sch_fifo_wr_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(2), .msg = "qcn_shap_gp1_sch_fifo_rd_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(3), .msg = "qcn_shap_gp1_sch_fifo_wr_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(4), .msg = "qcn_shap_gp2_sch_fifo_rd_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(5), .msg = "qcn_shap_gp2_sch_fifo_wr_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(6), .msg = "qcn_shap_gp3_sch_fifo_rd_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(7), .msg = "qcn_shap_gp3_sch_fifo_wr_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(8), .msg = "qcn_shap_gp0_offset_fifo_rd_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(9), .msg = "qcn_shap_gp0_offset_fifo_wr_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(10), .msg = "qcn_shap_gp1_offset_fifo_rd_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(11), .msg = "qcn_shap_gp1_offset_fifo_wr_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(12), .msg = "qcn_shap_gp2_offset_fifo_rd_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(13), .msg = "qcn_shap_gp2_offset_fifo_wr_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(14), .msg = "qcn_shap_gp3_offset_fifo_rd_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(15), .msg = "qcn_shap_gp3_offset_fifo_wr_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(16), .msg = "qcn_byte_info_fifo_rd_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(17), .msg = "qcn_byte_info_fifo_wr_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
        { /* sentinel */ }
 };
 
 static const struct hclge_hw_error hclge_qcn_ecc_rint[] = {
-       { .int_msk = BIT(1), .msg = "qcn_byte_mem_ecc_mbit_err" },
-       { .int_msk = BIT(3), .msg = "qcn_time_mem_ecc_mbit_err" },
-       { .int_msk = BIT(5), .msg = "qcn_fb_mem_ecc_mbit_err" },
-       { .int_msk = BIT(7), .msg = "qcn_link_mem_ecc_mbit_err" },
-       { .int_msk = BIT(9), .msg = "qcn_rate_mem_ecc_mbit_err" },
-       { .int_msk = BIT(11), .msg = "qcn_tmplt_mem_ecc_mbit_err" },
-       { .int_msk = BIT(13), .msg = "qcn_shap_cfg_mem_ecc_mbit_err" },
-       { .int_msk = BIT(15), .msg = "qcn_gp0_barrel_mem_ecc_mbit_err" },
-       { .int_msk = BIT(17), .msg = "qcn_gp1_barrel_mem_ecc_mbit_err" },
-       { .int_msk = BIT(19), .msg = "qcn_gp2_barrel_mem_ecc_mbit_err" },
-       { .int_msk = BIT(21), .msg = "qcn_gp3_barral_mem_ecc_mbit_err" },
+       { .int_msk = BIT(1), .msg = "qcn_byte_mem_ecc_mbit_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(3), .msg = "qcn_time_mem_ecc_mbit_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(5), .msg = "qcn_fb_mem_ecc_mbit_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(7), .msg = "qcn_link_mem_ecc_mbit_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(9), .msg = "qcn_rate_mem_ecc_mbit_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(11), .msg = "qcn_tmplt_mem_ecc_mbit_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(13), .msg = "qcn_shap_cfg_mem_ecc_mbit_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(15), .msg = "qcn_gp0_barrel_mem_ecc_mbit_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(17), .msg = "qcn_gp1_barrel_mem_ecc_mbit_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(19), .msg = "qcn_gp2_barrel_mem_ecc_mbit_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(21), .msg = "qcn_gp3_barral_mem_ecc_mbit_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
        { /* sentinel */ }
 };
 
 static const struct hclge_hw_error hclge_mac_afifo_tnl_int[] = {
-       { .int_msk = BIT(0), .msg = "egu_cge_afifo_ecc_1bit_err" },
-       { .int_msk = BIT(1), .msg = "egu_cge_afifo_ecc_mbit_err" },
-       { .int_msk = BIT(2), .msg = "egu_lge_afifo_ecc_1bit_err" },
-       { .int_msk = BIT(3), .msg = "egu_lge_afifo_ecc_mbit_err" },
-       { .int_msk = BIT(4), .msg = "cge_igu_afifo_ecc_1bit_err" },
-       { .int_msk = BIT(5), .msg = "cge_igu_afifo_ecc_mbit_err" },
-       { .int_msk = BIT(6), .msg = "lge_igu_afifo_ecc_1bit_err" },
-       { .int_msk = BIT(7), .msg = "lge_igu_afifo_ecc_mbit_err" },
-       { .int_msk = BIT(8), .msg = "cge_igu_afifo_overflow_err" },
-       { .int_msk = BIT(9), .msg = "lge_igu_afifo_overflow_err" },
-       { .int_msk = BIT(10), .msg = "egu_cge_afifo_underrun_err" },
-       { .int_msk = BIT(11), .msg = "egu_lge_afifo_underrun_err" },
-       { .int_msk = BIT(12), .msg = "egu_ge_afifo_underrun_err" },
-       { .int_msk = BIT(13), .msg = "ge_igu_afifo_overflow_err" },
+       { .int_msk = BIT(0), .msg = "egu_cge_afifo_ecc_1bit_err",
+         .reset_level = HNAE3_NONE_RESET },
+       { .int_msk = BIT(1), .msg = "egu_cge_afifo_ecc_mbit_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(2), .msg = "egu_lge_afifo_ecc_1bit_err",
+         .reset_level = HNAE3_NONE_RESET },
+       { .int_msk = BIT(3), .msg = "egu_lge_afifo_ecc_mbit_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(4), .msg = "cge_igu_afifo_ecc_1bit_err",
+         .reset_level = HNAE3_NONE_RESET },
+       { .int_msk = BIT(5), .msg = "cge_igu_afifo_ecc_mbit_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(6), .msg = "lge_igu_afifo_ecc_1bit_err",
+         .reset_level = HNAE3_NONE_RESET },
+       { .int_msk = BIT(7), .msg = "lge_igu_afifo_ecc_mbit_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(8), .msg = "cge_igu_afifo_overflow_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(9), .msg = "lge_igu_afifo_overflow_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(10), .msg = "egu_cge_afifo_underrun_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(11), .msg = "egu_lge_afifo_underrun_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(12), .msg = "egu_ge_afifo_underrun_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(13), .msg = "ge_igu_afifo_overflow_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
        { /* sentinel */ }
 };
 
 static const struct hclge_hw_error hclge_ppu_mpf_abnormal_int_st2[] = {
-       { .int_msk = BIT(13), .msg = "rpu_rx_pkt_bit32_ecc_mbit_err" },
-       { .int_msk = BIT(14), .msg = "rpu_rx_pkt_bit33_ecc_mbit_err" },
-       { .int_msk = BIT(15), .msg = "rpu_rx_pkt_bit34_ecc_mbit_err" },
-       { .int_msk = BIT(16), .msg = "rpu_rx_pkt_bit35_ecc_mbit_err" },
-       { .int_msk = BIT(17), .msg = "rcb_tx_ring_ecc_mbit_err" },
-       { .int_msk = BIT(18), .msg = "rcb_rx_ring_ecc_mbit_err" },
-       { .int_msk = BIT(19), .msg = "rcb_tx_fbd_ecc_mbit_err" },
-       { .int_msk = BIT(20), .msg = "rcb_rx_ebd_ecc_mbit_err" },
-       { .int_msk = BIT(21), .msg = "rcb_tso_info_ecc_mbit_err" },
-       { .int_msk = BIT(22), .msg = "rcb_tx_int_info_ecc_mbit_err" },
-       { .int_msk = BIT(23), .msg = "rcb_rx_int_info_ecc_mbit_err" },
-       { .int_msk = BIT(24), .msg = "tpu_tx_pkt_0_ecc_mbit_err" },
-       { .int_msk = BIT(25), .msg = "tpu_tx_pkt_1_ecc_mbit_err" },
-       { .int_msk = BIT(26), .msg = "rd_bus_err" },
-       { .int_msk = BIT(27), .msg = "wr_bus_err" },
-       { .int_msk = BIT(28), .msg = "reg_search_miss" },
-       { .int_msk = BIT(29), .msg = "rx_q_search_miss" },
-       { .int_msk = BIT(30), .msg = "ooo_ecc_err_detect" },
-       { .int_msk = BIT(31), .msg = "ooo_ecc_err_multpl" },
+       { .int_msk = BIT(13), .msg = "rpu_rx_pkt_bit32_ecc_mbit_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(14), .msg = "rpu_rx_pkt_bit33_ecc_mbit_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(15), .msg = "rpu_rx_pkt_bit34_ecc_mbit_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(16), .msg = "rpu_rx_pkt_bit35_ecc_mbit_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(17), .msg = "rcb_tx_ring_ecc_mbit_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(18), .msg = "rcb_rx_ring_ecc_mbit_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(19), .msg = "rcb_tx_fbd_ecc_mbit_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(20), .msg = "rcb_rx_ebd_ecc_mbit_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(21), .msg = "rcb_tso_info_ecc_mbit_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(22), .msg = "rcb_tx_int_info_ecc_mbit_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(23), .msg = "rcb_rx_int_info_ecc_mbit_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(24), .msg = "tpu_tx_pkt_0_ecc_mbit_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(25), .msg = "tpu_tx_pkt_1_ecc_mbit_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(26), .msg = "rd_bus_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(27), .msg = "wr_bus_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(28), .msg = "reg_search_miss",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(29), .msg = "rx_q_search_miss",
+         .reset_level = HNAE3_NONE_RESET },
+       { .int_msk = BIT(30), .msg = "ooo_ecc_err_detect",
+         .reset_level = HNAE3_NONE_RESET },
+       { .int_msk = BIT(31), .msg = "ooo_ecc_err_multpl",
+         .reset_level = HNAE3_GLOBAL_RESET },
        { /* sentinel */ }
 };
 
 static const struct hclge_hw_error hclge_ppu_mpf_abnormal_int_st3[] = {
-       { .int_msk = BIT(4), .msg = "gro_bd_ecc_mbit_err" },
-       { .int_msk = BIT(5), .msg = "gro_context_ecc_mbit_err" },
-       { .int_msk = BIT(6), .msg = "rx_stash_cfg_ecc_mbit_err" },
-       { .int_msk = BIT(7), .msg = "axi_rd_fbd_ecc_mbit_err" },
+       { .int_msk = BIT(4), .msg = "gro_bd_ecc_mbit_err",
+         .reset_level = HNAE3_CORE_RESET },
+       { .int_msk = BIT(5), .msg = "gro_context_ecc_mbit_err",
+         .reset_level = HNAE3_CORE_RESET },
+       { .int_msk = BIT(6), .msg = "rx_stash_cfg_ecc_mbit_err",
+         .reset_level = HNAE3_CORE_RESET },
+       { .int_msk = BIT(7), .msg = "axi_rd_fbd_ecc_mbit_err",
+         .reset_level = HNAE3_CORE_RESET },
        { /* sentinel */ }
 };
 
 static const struct hclge_hw_error hclge_ppu_pf_abnormal_int[] = {
-       { .int_msk = BIT(0), .msg = "over_8bd_no_fe" },
-       { .int_msk = BIT(1), .msg = "tso_mss_cmp_min_err" },
-       { .int_msk = BIT(2), .msg = "tso_mss_cmp_max_err" },
-       { .int_msk = BIT(3), .msg = "tx_rd_fbd_poison" },
-       { .int_msk = BIT(4), .msg = "rx_rd_ebd_poison" },
-       { .int_msk = BIT(5), .msg = "buf_wait_timeout" },
+       { .int_msk = BIT(0), .msg = "over_8bd_no_fe",
+         .reset_level = HNAE3_FUNC_RESET },
+       { .int_msk = BIT(1), .msg = "tso_mss_cmp_min_err",
+         .reset_level = HNAE3_NONE_RESET },
+       { .int_msk = BIT(2), .msg = "tso_mss_cmp_max_err",
+         .reset_level = HNAE3_NONE_RESET },
+       { .int_msk = BIT(3), .msg = "tx_rd_fbd_poison",
+         .reset_level = HNAE3_FUNC_RESET },
+       { .int_msk = BIT(4), .msg = "rx_rd_ebd_poison",
+         .reset_level = HNAE3_FUNC_RESET },
+       { .int_msk = BIT(5), .msg = "buf_wait_timeout",
+         .reset_level = HNAE3_NONE_RESET },
        { /* sentinel */ }
 };
 
 static const struct hclge_hw_error hclge_ssu_com_err_int[] = {
-       { .int_msk = BIT(0), .msg = "buf_sum_err" },
-       { .int_msk = BIT(1), .msg = "ppp_mb_num_err" },
-       { .int_msk = BIT(2), .msg = "ppp_mbid_err" },
-       { .int_msk = BIT(3), .msg = "ppp_rlt_mac_err" },
-       { .int_msk = BIT(4), .msg = "ppp_rlt_host_err" },
-       { .int_msk = BIT(5), .msg = "cks_edit_position_err" },
-       { .int_msk = BIT(6), .msg = "cks_edit_condition_err" },
-       { .int_msk = BIT(7), .msg = "vlan_edit_condition_err" },
-       { .int_msk = BIT(8), .msg = "vlan_num_ot_err" },
-       { .int_msk = BIT(9), .msg = "vlan_num_in_err" },
+       { .int_msk = BIT(0), .msg = "buf_sum_err",
+         .reset_level = HNAE3_NONE_RESET },
+       { .int_msk = BIT(1), .msg = "ppp_mb_num_err",
+         .reset_level = HNAE3_NONE_RESET },
+       { .int_msk = BIT(2), .msg = "ppp_mbid_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(3), .msg = "ppp_rlt_mac_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(4), .msg = "ppp_rlt_host_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(5), .msg = "cks_edit_position_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(6), .msg = "cks_edit_condition_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(7), .msg = "vlan_edit_condition_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(8), .msg = "vlan_num_ot_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(9), .msg = "vlan_num_in_err",
+         .reset_level = HNAE3_GLOBAL_RESET },
        { /* sentinel */ }
 };
 
 #define HCLGE_SSU_MEM_ECC_ERR(x) \
-       { .int_msk = BIT(x), .msg = "ssu_mem" #x "_ecc_mbit_err" }
+       { .int_msk = BIT(x), .msg = "ssu_mem" #x "_ecc_mbit_err", \
+         .reset_level = HNAE3_GLOBAL_RESET }
 
 static const struct hclge_hw_error hclge_ssu_mem_ecc_err_int[] = {
        HCLGE_SSU_MEM_ECC_ERR(0),
@@ -323,62 +504,106 @@ static const struct hclge_hw_error hclge_ssu_mem_ecc_err_int[] = {
 };
 
 static const struct hclge_hw_error hclge_ssu_port_based_err_int[] = {
-       { .int_msk = BIT(0), .msg = "roc_pkt_without_key_port" },
-       { .int_msk = BIT(1), .msg = "tpu_pkt_without_key_port" },
-       { .int_msk = BIT(2), .msg = "igu_pkt_without_key_port" },
-       { .int_msk = BIT(3), .msg = "roc_eof_mis_match_port" },
-       { .int_msk = BIT(4), .msg = "tpu_eof_mis_match_port" },
-       { .int_msk = BIT(5), .msg = "igu_eof_mis_match_port" },
-       { .int_msk = BIT(6), .msg = "roc_sof_mis_match_port" },
-       { .int_msk = BIT(7), .msg = "tpu_sof_mis_match_port" },
-       { .int_msk = BIT(8), .msg = "igu_sof_mis_match_port" },
-       { .int_msk = BIT(11), .msg = "ets_rd_int_rx_port" },
-       { .int_msk = BIT(12), .msg = "ets_wr_int_rx_port" },
-       { .int_msk = BIT(13), .msg = "ets_rd_int_tx_port" },
-       { .int_msk = BIT(14), .msg = "ets_wr_int_tx_port" },
+       { .int_msk = BIT(0), .msg = "roc_pkt_without_key_port",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(1), .msg = "tpu_pkt_without_key_port",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(2), .msg = "igu_pkt_without_key_port",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(3), .msg = "roc_eof_mis_match_port",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(4), .msg = "tpu_eof_mis_match_port",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(5), .msg = "igu_eof_mis_match_port",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(6), .msg = "roc_sof_mis_match_port",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(7), .msg = "tpu_sof_mis_match_port",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(8), .msg = "igu_sof_mis_match_port",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(11), .msg = "ets_rd_int_rx_port",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(12), .msg = "ets_wr_int_rx_port",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(13), .msg = "ets_rd_int_tx_port",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(14), .msg = "ets_wr_int_tx_port",
+         .reset_level = HNAE3_GLOBAL_RESET },
        { /* sentinel */ }
 };
 
 static const struct hclge_hw_error hclge_ssu_fifo_overflow_int[] = {
-       { .int_msk = BIT(0), .msg = "ig_mac_inf_int" },
-       { .int_msk = BIT(1), .msg = "ig_host_inf_int" },
-       { .int_msk = BIT(2), .msg = "ig_roc_buf_int" },
-       { .int_msk = BIT(3), .msg = "ig_host_data_fifo_int" },
-       { .int_msk = BIT(4), .msg = "ig_host_key_fifo_int" },
-       { .int_msk = BIT(5), .msg = "tx_qcn_fifo_int" },
-       { .int_msk = BIT(6), .msg = "rx_qcn_fifo_int" },
-       { .int_msk = BIT(7), .msg = "tx_pf_rd_fifo_int" },
-       { .int_msk = BIT(8), .msg = "rx_pf_rd_fifo_int" },
-       { .int_msk = BIT(9), .msg = "qm_eof_fifo_int" },
-       { .int_msk = BIT(10), .msg = "mb_rlt_fifo_int" },
-       { .int_msk = BIT(11), .msg = "dup_uncopy_fifo_int" },
-       { .int_msk = BIT(12), .msg = "dup_cnt_rd_fifo_int" },
-       { .int_msk = BIT(13), .msg = "dup_cnt_drop_fifo_int" },
-       { .int_msk = BIT(14), .msg = "dup_cnt_wrb_fifo_int" },
-       { .int_msk = BIT(15), .msg = "host_cmd_fifo_int" },
-       { .int_msk = BIT(16), .msg = "mac_cmd_fifo_int" },
-       { .int_msk = BIT(17), .msg = "host_cmd_bitmap_empty_int" },
-       { .int_msk = BIT(18), .msg = "mac_cmd_bitmap_empty_int" },
-       { .int_msk = BIT(19), .msg = "dup_bitmap_empty_int" },
-       { .int_msk = BIT(20), .msg = "out_queue_bitmap_empty_int" },
-       { .int_msk = BIT(21), .msg = "bank2_bitmap_empty_int" },
-       { .int_msk = BIT(22), .msg = "bank1_bitmap_empty_int" },
-       { .int_msk = BIT(23), .msg = "bank0_bitmap_empty_int" },
+       { .int_msk = BIT(0), .msg = "ig_mac_inf_int",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(1), .msg = "ig_host_inf_int",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(2), .msg = "ig_roc_buf_int",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(3), .msg = "ig_host_data_fifo_int",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(4), .msg = "ig_host_key_fifo_int",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(5), .msg = "tx_qcn_fifo_int",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(6), .msg = "rx_qcn_fifo_int",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(7), .msg = "tx_pf_rd_fifo_int",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(8), .msg = "rx_pf_rd_fifo_int",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(9), .msg = "qm_eof_fifo_int",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(10), .msg = "mb_rlt_fifo_int",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(11), .msg = "dup_uncopy_fifo_int",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(12), .msg = "dup_cnt_rd_fifo_int",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(13), .msg = "dup_cnt_drop_fifo_int",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(14), .msg = "dup_cnt_wrb_fifo_int",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(15), .msg = "host_cmd_fifo_int",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(16), .msg = "mac_cmd_fifo_int",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(17), .msg = "host_cmd_bitmap_empty_int",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(18), .msg = "mac_cmd_bitmap_empty_int",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(19), .msg = "dup_bitmap_empty_int",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(20), .msg = "out_queue_bitmap_empty_int",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(21), .msg = "bank2_bitmap_empty_int",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(22), .msg = "bank1_bitmap_empty_int",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(23), .msg = "bank0_bitmap_empty_int",
+         .reset_level = HNAE3_GLOBAL_RESET },
        { /* sentinel */ }
 };
 
 static const struct hclge_hw_error hclge_ssu_ets_tcg_int[] = {
-       { .int_msk = BIT(0), .msg = "ets_rd_int_rx_tcg" },
-       { .int_msk = BIT(1), .msg = "ets_wr_int_rx_tcg" },
-       { .int_msk = BIT(2), .msg = "ets_rd_int_tx_tcg" },
-       { .int_msk = BIT(3), .msg = "ets_wr_int_tx_tcg" },
+       { .int_msk = BIT(0), .msg = "ets_rd_int_rx_tcg",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(1), .msg = "ets_wr_int_rx_tcg",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(2), .msg = "ets_rd_int_tx_tcg",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(3), .msg = "ets_wr_int_tx_tcg",
+         .reset_level = HNAE3_GLOBAL_RESET },
        { /* sentinel */ }
 };
 
 static const struct hclge_hw_error hclge_ssu_port_based_pf_int[] = {
-       { .int_msk = BIT(0), .msg = "roc_pkt_without_key_port" },
-       { .int_msk = BIT(9), .msg = "low_water_line_err_port" },
-       { .int_msk = BIT(10), .msg = "hi_water_line_err_port" },
+       { .int_msk = BIT(0), .msg = "roc_pkt_without_key_port",
+         .reset_level = HNAE3_GLOBAL_RESET },
+       { .int_msk = BIT(9), .msg = "low_water_line_err_port",
+         .reset_level = HNAE3_NONE_RESET },
+       { .int_msk = BIT(10), .msg = "hi_water_line_err_port",
+         .reset_level = HNAE3_GLOBAL_RESET },
        { /* sentinel */ }
 };
 
@@ -406,16 +631,29 @@ static const struct hclge_hw_error hclge_rocee_qmm_ovf_err_int[] = {
        { /* sentinel */ }
 };
 
-static void hclge_log_error(struct device *dev, char *reg,
-                           const struct hclge_hw_error *err,
-                           u32 err_sts)
+static enum hnae3_reset_type hclge_log_error(struct device *dev, char *reg,
+                                            const struct hclge_hw_error *err,
+                                            u32 err_sts)
 {
+       enum hnae3_reset_type reset_level = HNAE3_FUNC_RESET;
+       bool need_reset = false;
+
        while (err->msg) {
-               if (err->int_msk & err_sts)
+               if (err->int_msk & err_sts) {
                        dev_warn(dev, "%s %s found [error status=0x%x]\n",
                                 reg, err->msg, err_sts);
+                       if (err->reset_level != HNAE3_NONE_RESET &&
+                           err->reset_level >= reset_level) {
+                               reset_level = err->reset_level;
+                               need_reset = true;
+                       }
+               }
                err++;
        }
+       if (need_reset)
+               return reset_level;
+       else
+               return HNAE3_NONE_RESET;
 }
 
 /* hclge_cmd_query_error: read the error information
@@ -826,6 +1064,7 @@ static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev,
                                      int num)
 {
        struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
+       enum hnae3_reset_type reset_level;
        struct device *dev = &hdev->pdev->dev;
        __le32 *desc_data;
        u32 status;
@@ -845,78 +1084,94 @@ static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev,
        /* log HNS common errors */
        status = le32_to_cpu(desc[0].data[0]);
        if (status) {
-               hclge_log_error(dev, "IMP_TCM_ECC_INT_STS",
-                               &hclge_imp_tcm_ecc_int[0], status);
-               HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
+               reset_level = hclge_log_error(dev, "IMP_TCM_ECC_INT_STS",
+                                             &hclge_imp_tcm_ecc_int[0],
+                                             status);
+               HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
        }
 
        status = le32_to_cpu(desc[0].data[1]);
        if (status) {
-               hclge_log_error(dev, "CMDQ_MEM_ECC_INT_STS",
-                               &hclge_cmdq_nic_mem_ecc_int[0], status);
-               HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
+               reset_level = hclge_log_error(dev, "CMDQ_MEM_ECC_INT_STS",
+                                             &hclge_cmdq_nic_mem_ecc_int[0],
+                                             status);
+               HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
        }
 
        if ((le32_to_cpu(desc[0].data[2])) & BIT(0)) {
                dev_warn(dev, "imp_rd_data_poison_err found\n");
-               HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
+               HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_NONE_RESET);
        }
 
        status = le32_to_cpu(desc[0].data[3]);
        if (status) {
-               hclge_log_error(dev, "TQP_INT_ECC_INT_STS",
-                               &hclge_tqp_int_ecc_int[0], status);
-               HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
+               reset_level = hclge_log_error(dev, "TQP_INT_ECC_INT_STS",
+                                             &hclge_tqp_int_ecc_int[0],
+                                             status);
+               HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
        }
 
        status = le32_to_cpu(desc[0].data[4]);
        if (status) {
-               hclge_log_error(dev, "MSIX_ECC_INT_STS",
-                               &hclge_msix_sram_ecc_int[0], status);
-               HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
+               reset_level = hclge_log_error(dev, "MSIX_ECC_INT_STS",
+                                             &hclge_msix_sram_ecc_int[0],
+                                             status);
+               HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
        }
 
        /* log SSU(Storage Switch Unit) errors */
        desc_data = (__le32 *)&desc[2];
        status = le32_to_cpu(*(desc_data + 2));
        if (status) {
-               hclge_log_error(dev, "SSU_ECC_MULTI_BIT_INT_0",
-                               &hclge_ssu_mem_ecc_err_int[0], status);
-               HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
+               reset_level = hclge_log_error(dev, "SSU_ECC_MULTI_BIT_INT_0",
+                                             &hclge_ssu_mem_ecc_err_int[0],
+                                             status);
+               HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
        }
 
        status = le32_to_cpu(*(desc_data + 3)) & BIT(0);
        if (status) {
                dev_warn(dev, "SSU_ECC_MULTI_BIT_INT_1 ssu_mem32_ecc_mbit_err found [error status=0x%x]\n",
                         status);
-               HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
+               HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
        }
 
        status = le32_to_cpu(*(desc_data + 4)) & HCLGE_SSU_COMMON_ERR_INT_MASK;
        if (status) {
-               hclge_log_error(dev, "SSU_COMMON_ERR_INT",
-                               &hclge_ssu_com_err_int[0], status);
-               HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
+               reset_level = hclge_log_error(dev, "SSU_COMMON_ERR_INT",
+                                             &hclge_ssu_com_err_int[0],
+                                             status);
+               HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
        }
 
        /* log IGU(Ingress Unit) errors */
        desc_data = (__le32 *)&desc[3];
        status = le32_to_cpu(*desc_data) & HCLGE_IGU_INT_MASK;
-       if (status)
-               hclge_log_error(dev, "IGU_INT_STS",
-                               &hclge_igu_int[0], status);
+       if (status) {
+               reset_level = hclge_log_error(dev, "IGU_INT_STS",
+                                             &hclge_igu_int[0], status);
+               HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
+       }
 
        /* log PPP(Programmable Packet Process) errors */
        desc_data = (__le32 *)&desc[4];
        status = le32_to_cpu(*(desc_data + 1));
-       if (status)
-               hclge_log_error(dev, "PPP_MPF_ABNORMAL_INT_ST1",
-                               &hclge_ppp_mpf_abnormal_int_st1[0], status);
+       if (status) {
+               reset_level =
+                       hclge_log_error(dev, "PPP_MPF_ABNORMAL_INT_ST1",
+                                       &hclge_ppp_mpf_abnormal_int_st1[0],
+                                       status);
+               HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
+       }
 
        status = le32_to_cpu(*(desc_data + 3)) & HCLGE_PPP_MPF_INT_ST3_MASK;
-       if (status)
-               hclge_log_error(dev, "PPP_MPF_ABNORMAL_INT_ST3",
-                               &hclge_ppp_mpf_abnormal_int_st3[0], status);
+       if (status) {
+               reset_level =
+                       hclge_log_error(dev, "PPP_MPF_ABNORMAL_INT_ST3",
+                                       &hclge_ppp_mpf_abnormal_int_st3[0],
+                                       status);
+               HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
+       }
 
        /* log PPU(RCB) errors */
        desc_data = (__le32 *)&desc[5];
@@ -924,55 +1179,60 @@ static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev,
        if (status) {
                dev_warn(dev, "PPU_MPF_ABNORMAL_INT_ST1 %s found\n",
                         "rpu_rx_pkt_ecc_mbit_err");
-               HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
+               HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
        }
 
        status = le32_to_cpu(*(desc_data + 2));
        if (status) {
-               hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST2",
-                               &hclge_ppu_mpf_abnormal_int_st2[0], status);
-               HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
+               reset_level =
+                       hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST2",
+                                       &hclge_ppu_mpf_abnormal_int_st2[0],
+                                       status);
+               HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
        }
 
        status = le32_to_cpu(*(desc_data + 3)) & HCLGE_PPU_MPF_INT_ST3_MASK;
        if (status) {
-               hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST3",
-                               &hclge_ppu_mpf_abnormal_int_st3[0], status);
-               HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
+               reset_level =
+                       hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST3",
+                                       &hclge_ppu_mpf_abnormal_int_st3[0],
+                                       status);
+               HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
        }
 
        /* log TM(Traffic Manager) errors */
        desc_data = (__le32 *)&desc[6];
        status = le32_to_cpu(*desc_data);
        if (status) {
-               hclge_log_error(dev, "TM_SCH_RINT",
-                               &hclge_tm_sch_rint[0], status);
-               HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
+               reset_level = hclge_log_error(dev, "TM_SCH_RINT",
+                                             &hclge_tm_sch_rint[0], status);
+               HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
        }
 
        /* log QCN(Quantized Congestion Control) errors */
        desc_data = (__le32 *)&desc[7];
        status = le32_to_cpu(*desc_data) & HCLGE_QCN_FIFO_INT_MASK;
        if (status) {
-               hclge_log_error(dev, "QCN_FIFO_RINT",
-                               &hclge_qcn_fifo_rint[0], status);
-               HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
+               reset_level = hclge_log_error(dev, "QCN_FIFO_RINT",
+                                             &hclge_qcn_fifo_rint[0], status);
+               HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
        }
 
        status = le32_to_cpu(*(desc_data + 1)) & HCLGE_QCN_ECC_INT_MASK;
        if (status) {
-               hclge_log_error(dev, "QCN_ECC_RINT",
-                               &hclge_qcn_ecc_rint[0], status);
-               HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
+               reset_level = hclge_log_error(dev, "QCN_ECC_RINT",
+                                             &hclge_qcn_ecc_rint[0],
+                                             status);
+               HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
        }
 
        /* log NCSI errors */
        desc_data = (__le32 *)&desc[9];
        status = le32_to_cpu(*desc_data) & HCLGE_NCSI_ECC_INT_MASK;
        if (status) {
-               hclge_log_error(dev, "NCSI_ECC_INT_RPT",
-                               &hclge_ncsi_err_int[0], status);
-               HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
+               reset_level = hclge_log_error(dev, "NCSI_ECC_INT_RPT",
+                                             &hclge_ncsi_err_int[0], status);
+               HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
        }
 
        /* clear all main PF RAS errors */
@@ -1000,6 +1260,7 @@ static int hclge_handle_pf_ras_error(struct hclge_dev *hdev,
 {
        struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
        struct device *dev = &hdev->pdev->dev;
+       enum hnae3_reset_type reset_level;
        __le32 *desc_data;
        u32 status;
        int ret;
@@ -1018,38 +1279,47 @@ static int hclge_handle_pf_ras_error(struct hclge_dev *hdev,
        /* log SSU(Storage Switch Unit) errors */
        status = le32_to_cpu(desc[0].data[0]);
        if (status) {
-               hclge_log_error(dev, "SSU_PORT_BASED_ERR_INT",
-                               &hclge_ssu_port_based_err_int[0], status);
-               HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
+               reset_level = hclge_log_error(dev, "SSU_PORT_BASED_ERR_INT",
+                                             &hclge_ssu_port_based_err_int[0],
+                                             status);
+               HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
        }
 
        status = le32_to_cpu(desc[0].data[1]);
        if (status) {
-               hclge_log_error(dev, "SSU_FIFO_OVERFLOW_INT",
-                               &hclge_ssu_fifo_overflow_int[0], status);
-               HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
+               reset_level = hclge_log_error(dev, "SSU_FIFO_OVERFLOW_INT",
+                                             &hclge_ssu_fifo_overflow_int[0],
+                                             status);
+               HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
        }
 
        status = le32_to_cpu(desc[0].data[2]);
        if (status) {
-               hclge_log_error(dev, "SSU_ETS_TCG_INT",
-                               &hclge_ssu_ets_tcg_int[0], status);
-               HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
+               reset_level = hclge_log_error(dev, "SSU_ETS_TCG_INT",
+                                             &hclge_ssu_ets_tcg_int[0],
+                                             status);
+               HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
        }
 
        /* log IGU(Ingress Unit) EGU(Egress Unit) TNL errors */
        desc_data = (__le32 *)&desc[1];
        status = le32_to_cpu(*desc_data) & HCLGE_IGU_EGU_TNL_INT_MASK;
-       if (status)
-               hclge_log_error(dev, "IGU_EGU_TNL_INT_STS",
-                               &hclge_igu_egu_tnl_int[0], status);
+       if (status) {
+               reset_level = hclge_log_error(dev, "IGU_EGU_TNL_INT_STS",
+                                             &hclge_igu_egu_tnl_int[0],
+                                             status);
+               HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
+       }
 
        /* log PPU(RCB) errors */
        desc_data = (__le32 *)&desc[3];
        status = le32_to_cpu(*desc_data) & HCLGE_PPU_PF_INT_RAS_MASK;
-       if (status)
-               hclge_log_error(dev, "PPU_PF_ABNORMAL_INT_ST0",
-                               &hclge_ppu_pf_abnormal_int[0], status);
+       if (status) {
+               reset_level = hclge_log_error(dev, "PPU_PF_ABNORMAL_INT_ST0",
+                                             &hclge_ppu_pf_abnormal_int[0],
+                                             status);
+               HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
+       }
 
        /* clear all PF RAS errors */
        hclge_cmd_reuse_desc(&desc[0], false);
@@ -1343,14 +1613,12 @@ int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
 {
        struct device *dev = &hdev->pdev->dev;
        u32 mpf_bd_num, pf_bd_num, bd_num;
+       enum hnae3_reset_type reset_level;
        struct hclge_desc desc_bd;
        struct hclge_desc *desc;
        __le32 *desc_data;
-       int ret = 0;
        u32 status;
-
-       /* set default handling */
-       set_bit(HNAE3_FUNC_RESET, reset_requests);
+       int ret;
 
        /* query the number of bds for the MSIx int status */
        hclge_cmd_setup_basic_desc(&desc_bd, HCLGE_QUERY_MSIX_INT_STS_BD_NUM,
@@ -1390,9 +1658,10 @@ int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
        desc_data = (__le32 *)&desc[1];
        status = le32_to_cpu(*desc_data);
        if (status) {
-               hclge_log_error(dev, "MAC_AFIFO_TNL_INT_R",
-                               &hclge_mac_afifo_tnl_int[0], status);
-               set_bit(HNAE3_GLOBAL_RESET, reset_requests);
+               reset_level = hclge_log_error(dev, "MAC_AFIFO_TNL_INT_R",
+                                             &hclge_mac_afifo_tnl_int[0],
+                                             status);
+               set_bit(reset_level, reset_requests);
        }
 
        /* log PPU(RCB) MPF errors */
@@ -1400,9 +1669,11 @@ int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
        status = le32_to_cpu(*(desc_data + 2)) &
                        HCLGE_PPU_MPF_INT_ST2_MSIX_MASK;
        if (status) {
-               hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST2",
-                               &hclge_ppu_mpf_abnormal_int_st2[0], status);
-               set_bit(HNAE3_CORE_RESET, reset_requests);
+               reset_level =
+                       hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST2",
+                                       &hclge_ppu_mpf_abnormal_int_st2[0],
+                                       status);
+               set_bit(reset_level, reset_requests);
        }
 
        /* clear all main PF MSIx errors */
@@ -1436,24 +1707,31 @@ int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
        /* log SSU PF errors */
        status = le32_to_cpu(desc[0].data[0]) & HCLGE_SSU_PORT_INT_MSIX_MASK;
        if (status) {
-               hclge_log_error(dev, "SSU_PORT_BASED_ERR_INT",
-                               &hclge_ssu_port_based_pf_int[0], status);
-               set_bit(HNAE3_GLOBAL_RESET, reset_requests);
+               reset_level = hclge_log_error(dev, "SSU_PORT_BASED_ERR_INT",
+                                             &hclge_ssu_port_based_pf_int[0],
+                                             status);
+               set_bit(reset_level, reset_requests);
        }
 
        /* read and log PPP PF errors */
        desc_data = (__le32 *)&desc[2];
        status = le32_to_cpu(*desc_data);
-       if (status)
-               hclge_log_error(dev, "PPP_PF_ABNORMAL_INT_ST0",
-                               &hclge_ppp_pf_abnormal_int[0], status);
+       if (status) {
+               reset_level = hclge_log_error(dev, "PPP_PF_ABNORMAL_INT_ST0",
+                                             &hclge_ppp_pf_abnormal_int[0],
+                                             status);
+               set_bit(reset_level, reset_requests);
+       }
 
        /* log PPU(RCB) PF errors */
        desc_data = (__le32 *)&desc[3];
        status = le32_to_cpu(*desc_data) & HCLGE_PPU_PF_INT_MSIX_MASK;
-       if (status)
-               hclge_log_error(dev, "PPU_PF_ABNORMAL_INT_ST",
-                               &hclge_ppu_pf_abnormal_int[0], status);
+       if (status) {
+               reset_level = hclge_log_error(dev, "PPU_PF_ABNORMAL_INT_ST",
+                                             &hclge_ppu_pf_abnormal_int[0],
+                                             status);
+               set_bit(reset_level, reset_requests);
+       }
 
        /* clear all PF MSIx errors */
        hclge_cmd_reuse_desc(&desc[0], false);
index fc068280d3917f6ae893e78c1e95ac160844f3a8..4a2e82f7f1122cb8e0cc7e0f2c29e905f0b13e8b 100644 (file)
@@ -112,6 +112,7 @@ struct hclge_hw_blk {
 struct hclge_hw_error {
        u32 int_msk;
        const char *msg;
+       enum hnae3_reset_type reset_level;
 };
 
 int hclge_hw_error_set_state(struct hclge_dev *hdev, bool state);
index deda606c51e7359851026b3b370791bc6ea161b6..7dba3b448b8b1cf0acd7d4337fbdc63757590718 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/pci.h>
 #include <linux/platform_device.h>
 #include <linux/if_vlan.h>
+#include <linux/crash_dump.h>
 #include <net/rtnetlink.h>
 #include "hclge_cmd.h"
 #include "hclge_dcb.h"
@@ -31,6 +32,7 @@
 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
 static int hclge_init_vlan_config(struct hclge_dev *hdev);
 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
+static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
                               u16 *allocated_size, bool is_alloc);
 
@@ -1015,6 +1017,23 @@ static int hclge_get_cap(struct hclge_dev *hdev)
        return ret;
 }
 
+static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
+{
+#define HCLGE_MIN_TX_DESC      64
+#define HCLGE_MIN_RX_DESC      64
+
+       if (!is_kdump_kernel())
+               return;
+
+       dev_info(&hdev->pdev->dev,
+                "Running kdump kernel. Using minimal resources\n");
+
+       /* minimal queue pairs equals to the number of vports */
+       hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
+       hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
+       hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
+}
+
 static int hclge_configure(struct hclge_dev *hdev)
 {
        struct hclge_cfg cfg;
@@ -1074,6 +1093,8 @@ static int hclge_configure(struct hclge_dev *hdev)
 
        hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
 
+       hclge_init_kdump_kernel_config(hdev);
+
        return ret;
 }
 
@@ -1337,6 +1358,8 @@ static int hclge_alloc_vport(struct hclge_dev *hdev)
                vport->back = hdev;
                vport->vport_id = i;
                vport->mps = HCLGE_MAC_DEFAULT_FRAME;
+               vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
+               vport->rxvlan_cfg.rx_vlan_offload_en = true;
                INIT_LIST_HEAD(&vport->vlan_list);
                INIT_LIST_HEAD(&vport->uc_mac_list);
                INIT_LIST_HEAD(&vport->mc_mac_list);
@@ -1399,7 +1422,7 @@ static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
        return ret;
 }
 
-static int hclge_get_tc_num(struct hclge_dev *hdev)
+static u32 hclge_get_tc_num(struct hclge_dev *hdev)
 {
        int i, cnt = 0;
 
@@ -1409,17 +1432,6 @@ static int hclge_get_tc_num(struct hclge_dev *hdev)
        return cnt;
 }
 
-static int hclge_get_pfc_enalbe_num(struct hclge_dev *hdev)
-{
-       int i, cnt = 0;
-
-       for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
-               if (hdev->hw_tc_map & BIT(i) &&
-                   hdev->tm_info.hw_pfc_map & BIT(i))
-                       cnt++;
-       return cnt;
-}
-
 /* Get the number of pfc enabled TCs, which have private buffer */
 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
                                  struct hclge_pkt_buf_alloc *buf_alloc)
@@ -1483,14 +1495,12 @@ static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
                                struct hclge_pkt_buf_alloc *buf_alloc,
                                u32 rx_all)
 {
-       u32 shared_buf_min, shared_buf_tc, shared_std;
-       int tc_num, pfc_enable_num;
+       u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
+       u32 tc_num = hclge_get_tc_num(hdev);
        u32 shared_buf, aligned_mps;
        u32 rx_priv;
        int i;
 
-       tc_num = hclge_get_tc_num(hdev);
-       pfc_enable_num = hclge_get_pfc_enalbe_num(hdev);
        aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
 
        if (hnae3_dev_dcb_supported(hdev))
@@ -1499,9 +1509,7 @@ static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
                shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
                                        + hdev->dv_buf_size;
 
-       shared_buf_tc = pfc_enable_num * aligned_mps +
-                       (tc_num - pfc_enable_num) * aligned_mps / 2 +
-                       aligned_mps;
+       shared_buf_tc = tc_num * aligned_mps + aligned_mps;
        shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
                             HCLGE_BUF_SIZE_UNIT);
 
@@ -1518,19 +1526,26 @@ static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
        } else {
                buf_alloc->s_buf.self.high = aligned_mps +
                                                HCLGE_NON_DCB_ADDITIONAL_BUF;
-               buf_alloc->s_buf.self.low =
-                       roundup(aligned_mps / 2, HCLGE_BUF_SIZE_UNIT);
+               buf_alloc->s_buf.self.low = aligned_mps;
+       }
+
+       if (hnae3_dev_dcb_supported(hdev)) {
+               if (tc_num)
+                       hi_thrd = (shared_buf - hdev->dv_buf_size) / tc_num;
+               else
+                       hi_thrd = shared_buf - hdev->dv_buf_size;
+
+               hi_thrd = max_t(u32, hi_thrd, 2 * aligned_mps);
+               hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
+               lo_thrd = hi_thrd - aligned_mps / 2;
+       } else {
+               hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
+               lo_thrd = aligned_mps;
        }
 
        for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
-               if ((hdev->hw_tc_map & BIT(i)) &&
-                   (hdev->tm_info.hw_pfc_map & BIT(i))) {
-                       buf_alloc->s_buf.tc_thrd[i].low = aligned_mps;
-                       buf_alloc->s_buf.tc_thrd[i].high = 2 * aligned_mps;
-               } else {
-                       buf_alloc->s_buf.tc_thrd[i].low = 0;
-                       buf_alloc->s_buf.tc_thrd[i].high = aligned_mps;
-               }
+               buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
+               buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
        }
 
        return true;
@@ -2143,7 +2158,8 @@ static int hclge_mac_init(struct hclge_dev *hdev)
 
 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
 {
-       if (!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
+       if (!test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) &&
+           !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
                schedule_work(&hdev->mbx_service_task);
 }
 
@@ -2657,7 +2673,7 @@ static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
                        return ret;
                }
 
-               if (!reset)
+               if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
                        continue;
 
                /* Inform VF to process the reset.
@@ -2694,9 +2710,18 @@ int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
 
 static void hclge_do_reset(struct hclge_dev *hdev)
 {
+       struct hnae3_handle *handle = &hdev->vport[0].nic;
        struct pci_dev *pdev = hdev->pdev;
        u32 val;
 
+       if (hclge_get_hw_reset_stat(handle)) {
+               dev_info(&pdev->dev, "Hardware reset not finish\n");
+               dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
+                        hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
+                        hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
+               return;
+       }
+
        switch (hdev->reset_type) {
        case HNAE3_GLOBAL_RESET:
                val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
@@ -2775,6 +2800,10 @@ static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
                clear_bit(HNAE3_FLR_RESET, addr);
        }
 
+       if (hdev->reset_type != HNAE3_NONE_RESET &&
+           rst_level < hdev->reset_type)
+               return HNAE3_NONE_RESET;
+
        return rst_level;
 }
 
@@ -3002,6 +3031,7 @@ static void hclge_reset(struct hclge_dev *hdev)
        hdev->last_reset_time = jiffies;
        hdev->reset_fail_cnt = 0;
        ae_dev->reset_type = HNAE3_NONE_RESET;
+       del_timer(&hdev->reset_timer);
 
        return;
 
@@ -5942,8 +5972,11 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport,
        }
 
        /* check if we just hit the duplicate */
-       if (!ret)
-               ret = -EINVAL;
+       if (!ret) {
+               dev_warn(&hdev->pdev->dev, "VF %d mac(%pM) exists\n",
+                        vport->vport_id, addr);
+               return 0;
+       }
 
        dev_err(&hdev->pdev->dev,
                "PF failed to add unicast entry(%pM) in the MAC table\n",
@@ -6293,7 +6326,8 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
                return -EINVAL;
        }
 
-       if (!is_first && hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
+       if ((!is_first || is_kdump_kernel()) &&
+           hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
                dev_warn(&hdev->pdev->dev,
                         "remove old uc mac address fail.\n");
 
@@ -6543,30 +6577,6 @@ static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
        return ret;
 }
 
-int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
-                         u16 vlan_id, bool is_kill)
-{
-       struct hclge_vport *vport = hclge_get_vport(handle);
-       struct hclge_dev *hdev = vport->back;
-
-       return hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id, vlan_id,
-                                       0, is_kill);
-}
-
-static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
-                                   u16 vlan, u8 qos, __be16 proto)
-{
-       struct hclge_vport *vport = hclge_get_vport(handle);
-       struct hclge_dev *hdev = vport->back;
-
-       if ((vfid >= hdev->num_alloc_vfs) || (vlan > 4095) || (qos > 7))
-               return -EINVAL;
-       if (proto != htons(ETH_P_8021Q))
-               return -EPROTONOSUPPORT;
-
-       return hclge_set_vlan_filter_hw(hdev, proto, vfid, vlan, qos, false);
-}
-
 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
 {
        struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
@@ -6640,6 +6650,52 @@ static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
        return status;
 }
 
+static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
+                                 u16 port_base_vlan_state,
+                                 u16 vlan_tag)
+{
+       int ret;
+
+       if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
+               vport->txvlan_cfg.accept_tag1 = true;
+               vport->txvlan_cfg.insert_tag1_en = false;
+               vport->txvlan_cfg.default_tag1 = 0;
+       } else {
+               vport->txvlan_cfg.accept_tag1 = false;
+               vport->txvlan_cfg.insert_tag1_en = true;
+               vport->txvlan_cfg.default_tag1 = vlan_tag;
+       }
+
+       vport->txvlan_cfg.accept_untag1 = true;
+
+       /* accept_tag2 and accept_untag2 are not supported on
+        * pdev revision(0x20), new revision support them,
+        * this two fields can not be configured by user.
+        */
+       vport->txvlan_cfg.accept_tag2 = true;
+       vport->txvlan_cfg.accept_untag2 = true;
+       vport->txvlan_cfg.insert_tag2_en = false;
+       vport->txvlan_cfg.default_tag2 = 0;
+
+       if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
+               vport->rxvlan_cfg.strip_tag1_en = false;
+               vport->rxvlan_cfg.strip_tag2_en =
+                               vport->rxvlan_cfg.rx_vlan_offload_en;
+       } else {
+               vport->rxvlan_cfg.strip_tag1_en =
+                               vport->rxvlan_cfg.rx_vlan_offload_en;
+               vport->rxvlan_cfg.strip_tag2_en = true;
+       }
+       vport->rxvlan_cfg.vlan1_vlan_prionly = false;
+       vport->rxvlan_cfg.vlan2_vlan_prionly = false;
+
+       ret = hclge_set_vlan_tx_offload_cfg(vport);
+       if (ret)
+               return ret;
+
+       return hclge_set_vlan_rx_offload_cfg(vport);
+}
+
 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
 {
        struct hclge_rx_vlan_type_cfg_cmd *rx_req;
@@ -6730,34 +6786,14 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev)
                return ret;
 
        for (i = 0; i < hdev->num_alloc_vport; i++) {
-               vport = &hdev->vport[i];
-               vport->txvlan_cfg.accept_tag1 = true;
-               vport->txvlan_cfg.accept_untag1 = true;
+               u16 vlan_tag;
 
-               /* accept_tag2 and accept_untag2 are not supported on
-                * pdev revision(0x20), new revision support them. The
-                * value of this two fields will not return error when driver
-                * send command to fireware in revision(0x20).
-                * This two fields can not configured by user.
-                */
-               vport->txvlan_cfg.accept_tag2 = true;
-               vport->txvlan_cfg.accept_untag2 = true;
-
-               vport->txvlan_cfg.insert_tag1_en = false;
-               vport->txvlan_cfg.insert_tag2_en = false;
-               vport->txvlan_cfg.default_tag1 = 0;
-               vport->txvlan_cfg.default_tag2 = 0;
-
-               ret = hclge_set_vlan_tx_offload_cfg(vport);
-               if (ret)
-                       return ret;
-
-               vport->rxvlan_cfg.strip_tag1_en = false;
-               vport->rxvlan_cfg.strip_tag2_en = true;
-               vport->rxvlan_cfg.vlan1_vlan_prionly = false;
-               vport->rxvlan_cfg.vlan2_vlan_prionly = false;
+               vport = &hdev->vport[i];
+               vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
 
-               ret = hclge_set_vlan_rx_offload_cfg(vport);
+               ret = hclge_vlan_offload_cfg(vport,
+                                            vport->port_base_vlan_cfg.state,
+                                            vlan_tag);
                if (ret)
                        return ret;
        }
@@ -6765,7 +6801,8 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev)
        return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
 }
 
-void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id)
+static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
+                                      bool writen_to_tbl)
 {
        struct hclge_vport_vlan_cfg *vlan;
 
@@ -6777,14 +6814,38 @@ void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id)
        if (!vlan)
                return;
 
-       vlan->hd_tbl_status = true;
+       vlan->hd_tbl_status = writen_to_tbl;
        vlan->vlan_id = vlan_id;
 
        list_add_tail(&vlan->node, &vport->vlan_list);
 }
 
-void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
-                              bool is_write_tbl)
+static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
+{
+       struct hclge_vport_vlan_cfg *vlan, *tmp;
+       struct hclge_dev *hdev = vport->back;
+       int ret;
+
+       list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
+               if (!vlan->hd_tbl_status) {
+                       ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
+                                                      vport->vport_id,
+                                                      vlan->vlan_id, 0, false);
+                       if (ret) {
+                               dev_err(&hdev->pdev->dev,
+                                       "restore vport vlan list failed, ret=%d\n",
+                                       ret);
+                               return ret;
+                       }
+               }
+               vlan->hd_tbl_status = true;
+       }
+
+       return 0;
+}
+
+static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
+                                     bool is_write_tbl)
 {
        struct hclge_vport_vlan_cfg *vlan, *tmp;
        struct hclge_dev *hdev = vport->back;
@@ -6847,14 +6908,203 @@ int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
 {
        struct hclge_vport *vport = hclge_get_vport(handle);
 
-       vport->rxvlan_cfg.strip_tag1_en = false;
-       vport->rxvlan_cfg.strip_tag2_en = enable;
+       if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
+               vport->rxvlan_cfg.strip_tag1_en = false;
+               vport->rxvlan_cfg.strip_tag2_en = enable;
+       } else {
+               vport->rxvlan_cfg.strip_tag1_en = enable;
+               vport->rxvlan_cfg.strip_tag2_en = true;
+       }
        vport->rxvlan_cfg.vlan1_vlan_prionly = false;
        vport->rxvlan_cfg.vlan2_vlan_prionly = false;
+       vport->rxvlan_cfg.rx_vlan_offload_en = enable;
 
        return hclge_set_vlan_rx_offload_cfg(vport);
 }
 
+static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
+                                           u16 port_base_vlan_state,
+                                           struct hclge_vlan_info *new_info,
+                                           struct hclge_vlan_info *old_info)
+{
+       struct hclge_dev *hdev = vport->back;
+       int ret;
+
+       if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
+               hclge_rm_vport_all_vlan_table(vport, false);
+               return hclge_set_vlan_filter_hw(hdev,
+                                                htons(new_info->vlan_proto),
+                                                vport->vport_id,
+                                                new_info->vlan_tag,
+                                                new_info->qos, false);
+       }
+
+       ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
+                                      vport->vport_id, old_info->vlan_tag,
+                                      old_info->qos, true);
+       if (ret)
+               return ret;
+
+       return hclge_add_vport_all_vlan_table(vport);
+}
+
+int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
+                                   struct hclge_vlan_info *vlan_info)
+{
+       struct hnae3_handle *nic = &vport->nic;
+       struct hclge_vlan_info *old_vlan_info;
+       struct hclge_dev *hdev = vport->back;
+       int ret;
+
+       old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
+
+       ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
+       if (ret)
+               return ret;
+
+       if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
+               /* add new VLAN tag */
+               ret = hclge_set_vlan_filter_hw(hdev,
+                                              htons(vlan_info->vlan_proto),
+                                              vport->vport_id,
+                                              vlan_info->vlan_tag,
+                                              vlan_info->qos, false);
+               if (ret)
+                       return ret;
+
+               /* remove old VLAN tag */
+               ret = hclge_set_vlan_filter_hw(hdev,
+                                              htons(old_vlan_info->vlan_proto),
+                                              vport->vport_id,
+                                              old_vlan_info->vlan_tag,
+                                              old_vlan_info->qos, true);
+               if (ret)
+                       return ret;
+
+               goto update;
+       }
+
+       ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
+                                              old_vlan_info);
+       if (ret)
+               return ret;
+
+       /* update state only when disable/enable port based VLAN */
+       vport->port_base_vlan_cfg.state = state;
+       if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
+               nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
+       else
+               nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
+
+update:
+       vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
+       vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
+       vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
+
+       return 0;
+}
+
+static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
+                                         enum hnae3_port_base_vlan_state state,
+                                         u16 vlan)
+{
+       if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
+               if (!vlan)
+                       return HNAE3_PORT_BASE_VLAN_NOCHANGE;
+               else
+                       return HNAE3_PORT_BASE_VLAN_ENABLE;
+       } else {
+               if (!vlan)
+                       return HNAE3_PORT_BASE_VLAN_DISABLE;
+               else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
+                       return HNAE3_PORT_BASE_VLAN_NOCHANGE;
+               else
+                       return HNAE3_PORT_BASE_VLAN_MODIFY;
+       }
+}
+
+static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
+                                   u16 vlan, u8 qos, __be16 proto)
+{
+       struct hclge_vport *vport = hclge_get_vport(handle);
+       struct hclge_dev *hdev = vport->back;
+       struct hclge_vlan_info vlan_info;
+       u16 state;
+       int ret;
+
+       if (hdev->pdev->revision == 0x20)
+               return -EOPNOTSUPP;
+
+       /* qos is a 3 bits value, so can not be bigger than 7 */
+       if (vfid >= hdev->num_alloc_vfs || vlan > VLAN_N_VID - 1 || qos > 7)
+               return -EINVAL;
+       if (proto != htons(ETH_P_8021Q))
+               return -EPROTONOSUPPORT;
+
+       vport = &hdev->vport[vfid];
+       state = hclge_get_port_base_vlan_state(vport,
+                                              vport->port_base_vlan_cfg.state,
+                                              vlan);
+       if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
+               return 0;
+
+       vlan_info.vlan_tag = vlan;
+       vlan_info.qos = qos;
+       vlan_info.vlan_proto = ntohs(proto);
+
+       /* update port based VLAN for PF */
+       if (!vfid) {
+               hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
+               ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
+               hclge_notify_client(hdev, HNAE3_UP_CLIENT);
+
+               return ret;
+       }
+
+       if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
+               return hclge_update_port_base_vlan_cfg(vport, state,
+                                                      &vlan_info);
+       } else {
+               ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
+                                                       (u8)vfid, state,
+                                                       vlan, qos,
+                                                       ntohs(proto));
+               return ret;
+       }
+}
+
+int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
+                         u16 vlan_id, bool is_kill)
+{
+       struct hclge_vport *vport = hclge_get_vport(handle);
+       struct hclge_dev *hdev = vport->back;
+       bool writen_to_tbl = false;
+       int ret = 0;
+
+       /* when port based VLAN enabled, we use port based VLAN as the VLAN
+        * filter entry. In this case, we don't update VLAN filter table
+        * when user add new VLAN or remove exist VLAN, just update the vport
+        * VLAN list. The VLAN id in VLAN list won't be writen in VLAN filter
+        * table until port based VLAN disabled
+        */
+       if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
+               ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
+                                              vlan_id, 0, is_kill);
+               writen_to_tbl = true;
+       }
+
+       if (ret)
+               return ret;
+
+       if (is_kill)
+               hclge_rm_vport_vlan_table(vport, vlan_id, false);
+       else
+               hclge_add_vport_vlan_table(vport, vlan_id,
+                                          writen_to_tbl);
+
+       return 0;
+}
+
 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
 {
        struct hclge_config_max_frm_size_cmd *req;
@@ -7708,7 +7958,7 @@ static void hclge_reset_vport_state(struct hclge_dev *hdev)
        int i;
 
        for (i = 0; i < hdev->num_alloc_vport; i++) {
-               hclge_vport_start(vport);
+               hclge_vport_stop(vport);
                vport++;
        }
 }
index b57ac4beb31310fa05600ae9cc3b223e92fab400..e736030ac180ff4fbd7b47a8985f35cb063a7494 100644 (file)
@@ -807,10 +807,11 @@ struct hclge_tx_vtag_cfg {
 
 /* VPort level vlan tag configuration for RX direction */
 struct hclge_rx_vtag_cfg {
-       bool strip_tag1_en;     /* Whether strip inner vlan tag */
-       bool strip_tag2_en;     /* Whether strip outer vlan tag */
-       bool vlan1_vlan_prionly;/* Inner VLAN Tag up to descriptor Enable */
-       bool vlan2_vlan_prionly;/* Outer VLAN Tag up to descriptor Enable */
+       u8 rx_vlan_offload_en;  /* Whether enable rx vlan offload */
+       u8 strip_tag1_en;       /* Whether strip inner vlan tag */
+       u8 strip_tag2_en;       /* Whether strip outer vlan tag */
+       u8 vlan1_vlan_prionly;  /* Inner VLAN Tag up to descriptor Enable */
+       u8 vlan2_vlan_prionly;  /* Outer VLAN Tag up to descriptor Enable */
 };
 
 struct hclge_rss_tuple_cfg {
@@ -829,6 +830,17 @@ enum HCLGE_VPORT_STATE {
        HCLGE_VPORT_STATE_MAX
 };
 
+struct hclge_vlan_info {
+       u16 vlan_proto; /* so far support 802.1Q only */
+       u16 qos;
+       u16 vlan_tag;
+};
+
+struct hclge_port_base_vlan_config {
+       u16 state;
+       struct hclge_vlan_info vlan_info;
+};
+
 struct hclge_vport {
        u16 alloc_tqps; /* Allocated Tx/Rx queues */
 
@@ -842,9 +854,10 @@ struct hclge_vport {
        u16 alloc_rss_size;
 
        u16 qs_offset;
-       u16 bw_limit;           /* VSI BW Limit (0 = disabled) */
+       u32 bw_limit;           /* VSI BW Limit (0 = disabled) */
        u8  dwrr;
 
+       struct hclge_port_base_vlan_config port_base_vlan_cfg;
        struct hclge_tx_vtag_cfg  txvlan_cfg;
        struct hclge_rx_vtag_cfg  rxvlan_cfg;
 
@@ -924,9 +937,11 @@ void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
                                  enum HCLGE_MAC_ADDR_TYPE mac_type);
 void hclge_uninit_vport_mac_table(struct hclge_dev *hdev);
-void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id);
-void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
-                              bool is_write_tbl);
 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list);
 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev);
+int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
+                                   struct hclge_vlan_info *vlan_info);
+int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid,
+                                     u16 state, u16 vlan_tag, u16 qos,
+                                     u16 vlan_proto);
 #endif
index 306a23e486de522c6f910d05904dd0e35f3259d5..24386bd894f713b4541bddb7062d7a0a93e1ff16 100644 (file)
@@ -289,9 +289,25 @@ static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport,
        return 0;
 }
 
+int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid,
+                                     u16 state, u16 vlan_tag, u16 qos,
+                                     u16 vlan_proto)
+{
+#define MSG_DATA_SIZE  8
+
+       u8 msg_data[MSG_DATA_SIZE];
+
+       memcpy(&msg_data[0], &state, sizeof(u16));
+       memcpy(&msg_data[2], &vlan_proto, sizeof(u16));
+       memcpy(&msg_data[4], &qos, sizeof(u16));
+       memcpy(&msg_data[6], &vlan_tag, sizeof(u16));
+
+       return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data),
+                                 HLCGE_MBX_PUSH_VLAN_INFO, vfid);
+}
+
 static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport,
-                                struct hclge_mbx_vf_to_pf_cmd *mbx_req,
-                                bool gen_resp)
+                                struct hclge_mbx_vf_to_pf_cmd *mbx_req)
 {
        int status = 0;
 
@@ -305,19 +321,27 @@ static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport,
                memcpy(&proto, &mbx_req->msg[5], sizeof(proto));
                status = hclge_set_vlan_filter(handle, cpu_to_be16(proto),
                                               vlan, is_kill);
-               if (!status)
-                       is_kill ? hclge_rm_vport_vlan_table(vport, vlan, false)
-                       : hclge_add_vport_vlan_table(vport, vlan);
        } else if (mbx_req->msg[1] == HCLGE_MBX_VLAN_RX_OFF_CFG) {
                struct hnae3_handle *handle = &vport->nic;
                bool en = mbx_req->msg[2] ? true : false;
 
                status = hclge_en_hw_strip_rxvtag(handle, en);
+       } else if (mbx_req->msg[1] == HCLGE_MBX_PORT_BASE_VLAN_CFG) {
+               struct hclge_vlan_info *vlan_info;
+               u16 *state;
+
+               state = (u16 *)&mbx_req->msg[2];
+               vlan_info = (struct hclge_vlan_info *)&mbx_req->msg[4];
+               status = hclge_update_port_base_vlan_cfg(vport, *state,
+                                                        vlan_info);
+       } else if (mbx_req->msg[1] == HCLGE_MBX_GET_PORT_BASE_VLAN_STATE) {
+               u8 state;
+
+               state = vport->port_base_vlan_cfg.state;
+               status = hclge_gen_resp_to_vf(vport, mbx_req, 0, &state,
+                                             sizeof(u8));
        }
 
-       if (gen_resp)
-               status = hclge_gen_resp_to_vf(vport, mbx_req, status, NULL, 0);
-
        return status;
 }
 
@@ -385,24 +409,32 @@ static int hclge_get_vf_queue_depth(struct hclge_vport *vport,
                                    HCLGE_TQPS_DEPTH_INFO_LEN);
 }
 
+static int hclge_get_vf_media_type(struct hclge_vport *vport,
+                                  struct hclge_mbx_vf_to_pf_cmd *mbx_req)
+{
+       struct hclge_dev *hdev = vport->back;
+       u8 resp_data;
+
+       resp_data = hdev->hw.mac.media_type;
+       return hclge_gen_resp_to_vf(vport, mbx_req, 0, &resp_data,
+                                   sizeof(resp_data));
+}
+
 static int hclge_get_link_info(struct hclge_vport *vport,
                               struct hclge_mbx_vf_to_pf_cmd *mbx_req)
 {
        struct hclge_dev *hdev = vport->back;
        u16 link_status;
-       u8 msg_data[10];
-       u16 media_type;
+       u8 msg_data[8];
        u8 dest_vfid;
        u16 duplex;
 
        /* mac.link can only be 0 or 1 */
        link_status = (u16)hdev->hw.mac.link;
        duplex = hdev->hw.mac.duplex;
-       media_type = hdev->hw.mac.media_type;
        memcpy(&msg_data[0], &link_status, sizeof(u16));
        memcpy(&msg_data[2], &hdev->hw.mac.speed, sizeof(u32));
        memcpy(&msg_data[6], &duplex, sizeof(u16));
-       memcpy(&msg_data[8], &media_type, sizeof(u16));
        dest_vfid = mbx_req->mbx_src_vfid;
 
        /* send this requested info to VF */
@@ -579,7 +611,7 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
                                        ret);
                        break;
                case HCLGE_MBX_SET_VLAN:
-                       ret = hclge_set_vf_vlan_cfg(vport, req, false);
+                       ret = hclge_set_vf_vlan_cfg(vport, req);
                        if (ret)
                                dev_err(&hdev->pdev->dev,
                                        "PF failed(%d) to config VF's VLAN\n",
@@ -662,6 +694,13 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
                        hclge_rm_vport_all_vlan_table(vport, true);
                        mutex_unlock(&hdev->vport_cfg_mutex);
                        break;
+               case HCLGE_MBX_GET_MEDIA_TYPE:
+                       ret = hclge_get_vf_media_type(vport, req);
+                       if (ret)
+                               dev_err(&hdev->pdev->dev,
+                                       "PF fail(%d) to media type for VF\n",
+                                       ret);
+                       break;
                default:
                        dev_err(&hdev->pdev->dev,
                                "un-supported mailbox message, code = %d\n",
index 48eda2c6fdae3e49eacb9094935c487743814418..12be4e293fcff2adffe5c0f7ab290a2e18f6f7e3 100644 (file)
@@ -121,12 +121,18 @@ static int hclge_mdio_read(struct mii_bus *bus, int phyid, int regnum)
 
 int hclge_mac_mdio_config(struct hclge_dev *hdev)
 {
+#define PHY_INEXISTENT 255
+
        struct hclge_mac *mac = &hdev->hw.mac;
        struct phy_device *phydev;
        struct mii_bus *mdio_bus;
        int ret;
 
-       if (hdev->hw.mac.phy_addr >= PHY_MAX_ADDR) {
+       if (hdev->hw.mac.phy_addr == PHY_INEXISTENT) {
+               dev_info(&hdev->pdev->dev,
+                        "no phy device is connected to mdio bus\n");
+               return 0;
+       } else if (hdev->hw.mac.phy_addr >= PHY_MAX_ADDR) {
                dev_err(&hdev->pdev->dev, "phy_addr(%d) is too large.\n",
                        hdev->hw.mac.phy_addr);
                return -EINVAL;
index fb93bbd358455a735880d6e83cd314c0773a636a..6193f8fa7cf34aa142f575ca903f42d666847f31 100644 (file)
@@ -3,7 +3,7 @@
 # Makefile for the HISILICON network device drivers.
 #
 
-ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3
+ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3
 
 obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o
 hclgevf-objs = hclgevf_main.o hclgevf_cmd.o hclgevf_mbx.o
\ No newline at end of file
index 9441b453d38dfa9ac382e3c9f0e330aed09ab445..1b428d4a1132b22f7f40f2cc9037fd95185522c4 100644 (file)
@@ -27,26 +27,39 @@ static int hclgevf_ring_space(struct hclgevf_cmq_ring *ring)
        return ring->desc_num - used - 1;
 }
 
+static int hclgevf_is_valid_csq_clean_head(struct hclgevf_cmq_ring *ring,
+                                          int head)
+{
+       int ntu = ring->next_to_use;
+       int ntc = ring->next_to_clean;
+
+       if (ntu > ntc)
+               return head >= ntc && head <= ntu;
+
+       return head >= ntc || head <= ntu;
+}
+
 static int hclgevf_cmd_csq_clean(struct hclgevf_hw *hw)
 {
+       struct hclgevf_dev *hdev = container_of(hw, struct hclgevf_dev, hw);
        struct hclgevf_cmq_ring *csq = &hw->cmq.csq;
-       u16 ntc = csq->next_to_clean;
-       struct hclgevf_desc *desc;
        int clean = 0;
        u32 head;
 
-       desc = &csq->desc[ntc];
        head = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG);
-       while (head != ntc) {
-               memset(desc, 0, sizeof(*desc));
-               ntc++;
-               if (ntc == csq->desc_num)
-                       ntc = 0;
-               desc = &csq->desc[ntc];
-               clean++;
+       rmb(); /* Make sure head is ready before touch any data */
+
+       if (!hclgevf_is_valid_csq_clean_head(csq, head)) {
+               dev_warn(&hdev->pdev->dev, "wrong cmd head (%d, %d-%d)\n", head,
+                        csq->next_to_use, csq->next_to_clean);
+               dev_warn(&hdev->pdev->dev,
+                        "Disabling any further commands to IMP firmware\n");
+               set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
+               return -EIO;
        }
-       csq->next_to_clean = ntc;
 
+       clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num;
+       csq->next_to_clean = head;
        return clean;
 }
 
@@ -321,7 +334,7 @@ int hclgevf_cmd_init(struct hclgevf_dev *hdev)
        int ret;
 
        spin_lock_bh(&hdev->hw.cmq.csq.lock);
-       spin_lock_bh(&hdev->hw.cmq.crq.lock);
+       spin_lock(&hdev->hw.cmq.crq.lock);
 
        /* initialize the pointers of async rx queue of mailbox */
        hdev->arq.hdev = hdev;
@@ -335,7 +348,7 @@ int hclgevf_cmd_init(struct hclgevf_dev *hdev)
 
        hclgevf_cmd_init_regs(&hdev->hw);
 
-       spin_unlock_bh(&hdev->hw.cmq.crq.lock);
+       spin_unlock(&hdev->hw.cmq.crq.lock);
        spin_unlock_bh(&hdev->hw.cmq.csq.lock);
 
        clear_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
@@ -344,8 +357,8 @@ int hclgevf_cmd_init(struct hclgevf_dev *hdev)
         * reset may happen when lower level reset is being processed.
         */
        if (hclgevf_is_reset_pending(hdev)) {
-               set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
-               return -EBUSY;
+               ret = -EBUSY;
+               goto err_cmd_init;
        }
 
        /* get firmware version */
@@ -353,13 +366,18 @@ int hclgevf_cmd_init(struct hclgevf_dev *hdev)
        if (ret) {
                dev_err(&hdev->pdev->dev,
                        "failed(%d) to query firmware version\n", ret);
-               return ret;
+               goto err_cmd_init;
        }
        hdev->fw_version = version;
 
        dev_info(&hdev->pdev->dev, "The firmware version is %08x\n", version);
 
        return 0;
+
+err_cmd_init:
+       set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
+
+       return ret;
 }
 
 static void hclgevf_cmd_uninit_regs(struct hclgevf_hw *hw)
index 65bdc689a4cee390ba4839639cb995b3232f65c2..2e277c91a106b50a5a9fa7052b30bf031cdd67b6 100644 (file)
@@ -245,6 +245,27 @@ static int hclgevf_get_tc_info(struct hclgevf_dev *hdev)
        return 0;
 }
 
+static int hclgevf_get_port_base_vlan_filter_state(struct hclgevf_dev *hdev)
+{
+       struct hnae3_handle *nic = &hdev->nic;
+       u8 resp_msg;
+       int ret;
+
+       ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN,
+                                  HCLGE_MBX_GET_PORT_BASE_VLAN_STATE,
+                                  NULL, 0, true, &resp_msg, sizeof(u8));
+       if (ret) {
+               dev_err(&hdev->pdev->dev,
+                       "VF request to get port based vlan state failed %d",
+                       ret);
+               return ret;
+       }
+
+       nic->port_base_vlan_state = resp_msg;
+
+       return 0;
+}
+
 static int hclgevf_get_queue_info(struct hclgevf_dev *hdev)
 {
 #define HCLGEVF_TQPS_RSS_INFO_LEN      6
@@ -307,6 +328,25 @@ static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id)
        return qid_in_pf;
 }
 
+static int hclgevf_get_pf_media_type(struct hclgevf_dev *hdev)
+{
+       u8 resp_msg;
+       int ret;
+
+       ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_MEDIA_TYPE, 0, NULL, 0,
+                                  true, &resp_msg, sizeof(resp_msg));
+       if (ret) {
+               dev_err(&hdev->pdev->dev,
+                       "VF request to get the pf port media type failed %d",
+                       ret);
+               return ret;
+       }
+
+       hdev->hw.mac.media_type = resp_msg;
+
+       return 0;
+}
+
 static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev)
 {
        struct hclgevf_tqp *tqp;
@@ -1455,6 +1495,8 @@ static int hclgevf_reset(struct hclgevf_dev *hdev)
         */
        hclgevf_cmd_init(hdev);
        dev_err(&hdev->pdev->dev, "failed to reset VF\n");
+       if (hclgevf_is_reset_pending(hdev))
+               hclgevf_reset_task_schedule(hdev);
 
        return ret;
 }
@@ -1564,8 +1606,7 @@ static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev)
 
 void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev)
 {
-       if (!test_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state) &&
-           !test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) {
+       if (!test_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state)) {
                set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state);
                schedule_work(&hdev->rst_service_task);
        }
@@ -1814,6 +1855,11 @@ static int hclgevf_configure(struct hclgevf_dev *hdev)
 {
        int ret;
 
+       /* get current port based vlan state from PF */
+       ret = hclgevf_get_port_base_vlan_filter_state(hdev);
+       if (ret)
+               return ret;
+
        /* get queue configuration from PF */
        ret = hclgevf_get_queue_info(hdev);
        if (ret)
@@ -1824,6 +1870,10 @@ static int hclgevf_configure(struct hclgevf_dev *hdev)
        if (ret)
                return ret;
 
+       ret = hclgevf_get_pf_media_type(hdev);
+       if (ret)
+               return ret;
+
        /* get tc configuration from PF */
        return hclgevf_get_tc_info(hdev);
 }
@@ -2007,9 +2057,15 @@ static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive)
 static int hclgevf_client_start(struct hnae3_handle *handle)
 {
        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+       int ret;
+
+       ret = hclgevf_set_alive(handle, true);
+       if (ret)
+               return ret;
 
        mod_timer(&hdev->keep_alive_timer, jiffies + 2 * HZ);
-       return hclgevf_set_alive(handle, true);
+
+       return 0;
 }
 
 static void hclgevf_client_stop(struct hnae3_handle *handle)
@@ -2051,6 +2107,10 @@ static void hclgevf_state_uninit(struct hclgevf_dev *hdev)
 {
        set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
 
+       if (hdev->keep_alive_timer.function)
+               del_timer_sync(&hdev->keep_alive_timer);
+       if (hdev->keep_alive_task.func)
+               cancel_work_sync(&hdev->keep_alive_task);
        if (hdev->service_timer.function)
                del_timer_sync(&hdev->service_timer);
        if (hdev->service_task.func)
@@ -2756,6 +2816,31 @@ static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version,
        }
 }
 
+void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state,
+                                       u8 *port_base_vlan_info, u8 data_size)
+{
+       struct hnae3_handle *nic = &hdev->nic;
+
+       rtnl_lock();
+       hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
+       rtnl_unlock();
+
+       /* send msg to PF and wait update port based vlan info */
+       hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN,
+                            HCLGE_MBX_PORT_BASE_VLAN_CFG,
+                            port_base_vlan_info, data_size,
+                            false, NULL, 0);
+
+       if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
+               nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
+       else
+               nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
+
+       rtnl_lock();
+       hclgevf_notify_client(hdev, HNAE3_UP_CLIENT);
+       rtnl_unlock();
+}
+
 static const struct hnae3_ae_ops hclgevf_ops = {
        .init_ae_dev = hclgevf_init_ae_dev,
        .uninit_ae_dev = hclgevf_uninit_ae_dev,
index c128863ee7d07f5bd7922a286e9d24a6c5fac6ed..49e5bec53d45624d8afae0475d4f1737f02e59b4 100644 (file)
@@ -290,4 +290,6 @@ void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed,
                                 u8 duplex);
 void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev);
 void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev);
+void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state,
+                                       u8 *port_base_vlan_info, u8 data_size);
 #endif
index 7dc3c9f79169f119e53fb44c8ed929611a217878..bf570840b1f4e65538b9a6bba3b402c978842499 100644 (file)
@@ -198,6 +198,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
                case HCLGE_MBX_LINK_STAT_CHANGE:
                case HCLGE_MBX_ASSERTING_RESET:
                case HCLGE_MBX_LINK_STAT_MODE:
+               case HLCGE_MBX_PUSH_VLAN_INFO:
                        /* set this mbx event as pending. This is required as we
                         * might loose interrupt event when mbx task is busy
                         * handling. This shall be cleared when mbx task just
@@ -243,8 +244,8 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
 void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
 {
        enum hnae3_reset_type reset_type;
-       u16 link_status;
-       u16 *msg_q;
+       u16 link_status, state;
+       u16 *msg_q, *vlan_info;
        u8 duplex;
        u32 speed;
        u32 tail;
@@ -272,7 +273,6 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
                        link_status = le16_to_cpu(msg_q[1]);
                        memcpy(&speed, &msg_q[2], sizeof(speed));
                        duplex = (u8)le16_to_cpu(msg_q[4]);
-                       hdev->hw.mac.media_type = (u8)le16_to_cpu(msg_q[5]);
 
                        /* update upper layer with new link link status */
                        hclgevf_update_link_status(hdev, link_status);
@@ -300,6 +300,12 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
                        hclgevf_reset_task_schedule(hdev);
 
                        break;
+               case HLCGE_MBX_PUSH_VLAN_INFO:
+                       state = le16_to_cpu(msg_q[1]);
+                       vlan_info = &msg_q[1];
+                       hclgevf_update_port_base_vlan_info(hdev, state,
+                                                          (u8 *)vlan_info, 8);
+                       break;
                default:
                        dev_err(&hdev->pdev->dev,
                                "fetched unsupported(%d) message from arq\n",
index baf5cc251f3299499f3fc03ee17513aa740110f6..8b8a7d00e8e0c92d23a9ca67683ce98a0417fddc 100644 (file)
@@ -39,7 +39,7 @@ struct hns_mdio_sc_reg {
 };
 
 struct hns_mdio_device {
-       void *vbase;            /* mdio reg base address */
+       u8 __iomem *vbase;              /* mdio reg base address */
        struct regmap *subctrl_vbase;
        struct hns_mdio_sc_reg sc_reg;
 };
@@ -96,21 +96,17 @@ enum mdio_c45_op_seq {
 #define MDIO_SC_CLK_ST         0x531C
 #define MDIO_SC_RESET_ST       0x5A1C
 
-static void mdio_write_reg(void *base, u32 reg, u32 value)
+static void mdio_write_reg(u8 __iomem *base, u32 reg, u32 value)
 {
-       u8 __iomem *reg_addr = (u8 __iomem *)base;
-
-       writel_relaxed(value, reg_addr + reg);
+       writel_relaxed(value, base + reg);
 }
 
 #define MDIO_WRITE_REG(a, reg, value) \
        mdio_write_reg((a)->vbase, (reg), (value))
 
-static u32 mdio_read_reg(void *base, u32 reg)
+static u32 mdio_read_reg(u8 __iomem *base, u32 reg)
 {
-       u8 __iomem *reg_addr = (u8 __iomem *)base;
-
-       return readl_relaxed(reg_addr + reg);
+       return readl_relaxed(base + reg);
 }
 
 #define mdio_set_field(origin, mask, shift, val) \
@@ -121,7 +117,7 @@ static u32 mdio_read_reg(void *base, u32 reg)
 
 #define mdio_get_field(origin, mask, shift) (((origin) >> (shift)) & (mask))
 
-static void mdio_set_reg_field(void *base, u32 reg, u32 mask, u32 shift,
+static void mdio_set_reg_field(u8 __iomem *base, u32 reg, u32 mask, u32 shift,
                               u32 val)
 {
        u32 origin = mdio_read_reg(base, reg);
@@ -133,7 +129,7 @@ static void mdio_set_reg_field(void *base, u32 reg, u32 mask, u32 shift,
 #define MDIO_SET_REG_FIELD(dev, reg, mask, shift, val) \
        mdio_set_reg_field((dev)->vbase, (reg), (mask), (shift), (val))
 
-static u32 mdio_get_reg_field(void *base, u32 reg, u32 mask, u32 shift)
+static u32 mdio_get_reg_field(u8 __iomem *base, u32 reg, u32 mask, u32 shift)
 {
        u32 origin;
 
index e17bf33eba0c5e09f7708dc52566b342f4777dc6..0fbe8046824b2106eed2bbf62e54b477c97de7e7 100644 (file)
@@ -518,7 +518,7 @@ netdev_tx_t hinic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
 
 flush_skbs:
        netdev_txq = netdev_get_tx_queue(netdev, q_id);
-       if ((!skb->xmit_more) || (netif_xmit_stopped(netdev_txq)))
+       if ((!netdev_xmit_more()) || (netif_xmit_stopped(netdev_txq)))
                hinic_sq_write_db(txq->sq, prod_idx, wqe_size, 0);
 
        return err;
index 90b62c1412c8f4715eaf1ab3ca14a9128f1f9046..707c8ba120c25d033af9768fbb76da12954abdbd 100644 (file)
@@ -1463,7 +1463,7 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
 
        memset(pr, 0, sizeof(struct ehea_port_res));
 
-       pr->tx_bytes = rx_bytes;
+       pr->tx_bytes = tx_bytes;
        pr->tx_packets = tx_packets;
        pr->rx_bytes = rx_bytes;
        pr->rx_packets = rx_packets;
index 5e4e37132bf257fda81a58715cf781748e767853..77ce17383aba8179ba9f8bffaccc72e7bd7e7468 100644 (file)
@@ -123,8 +123,7 @@ struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter,
                               int nr_of_cqe, u64 eq_handle, u32 cq_token)
 {
        struct ehea_cq *cq;
-       struct h_epa epa;
-       u64 *cq_handle_ref, hret, rpage;
+       u64 hret, rpage;
        u32 counter;
        int ret;
        void *vpage;
@@ -139,8 +138,6 @@ struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter,
 
        cq->adapter = adapter;
 
-       cq_handle_ref = &cq->fw_handle;
-
        hret = ehea_h_alloc_resource_cq(adapter->handle, &cq->attr,
                                        &cq->fw_handle, &cq->epas);
        if (hret != H_SUCCESS) {
@@ -188,7 +185,6 @@ struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter,
        }
 
        hw_qeit_reset(&cq->hw_queue);
-       epa = cq->epas.kernel;
        ehea_reset_cq_ep(cq);
        ehea_reset_cq_n1(cq);
 
index 25b8e04ef11a78178030e954e51110580c73a92c..5e3cdb0b46d534fbd0fa2dde41c1e88cb14767ab 100644 (file)
@@ -1886,6 +1886,7 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter,
         */
        adapter->state = VNIC_PROBED;
 
+       reinit_completion(&adapter->init_done);
        rc = init_crq_queue(adapter);
        if (rc) {
                netdev_err(adapter->netdev,
@@ -1968,13 +1969,11 @@ static void __ibmvnic_reset(struct work_struct *work)
 {
        struct ibmvnic_rwi *rwi;
        struct ibmvnic_adapter *adapter;
-       struct net_device *netdev;
        bool we_lock_rtnl = false;
        u32 reset_state;
        int rc = 0;
 
        adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
-       netdev = adapter->netdev;
 
        /* netif_set_real_num_xx_queues needs to take rtnl lock here
         * unless wait_for_reset is set, in which case the rtnl lock
@@ -3759,6 +3758,7 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
 {
        struct device *dev = &adapter->vdev->dev;
        struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
+       netdev_features_t old_hw_features = 0;
        union ibmvnic_crq crq;
        int i;
 
@@ -3834,24 +3834,41 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
        adapter->ip_offload_ctrl.large_rx_ipv4 = 0;
        adapter->ip_offload_ctrl.large_rx_ipv6 = 0;
 
-       adapter->netdev->features = NETIF_F_SG | NETIF_F_GSO;
+       if (adapter->state != VNIC_PROBING) {
+               old_hw_features = adapter->netdev->hw_features;
+               adapter->netdev->hw_features = 0;
+       }
+
+       adapter->netdev->hw_features = NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO;
 
        if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
-               adapter->netdev->features |= NETIF_F_IP_CSUM;
+               adapter->netdev->hw_features |= NETIF_F_IP_CSUM;
 
        if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
-               adapter->netdev->features |= NETIF_F_IPV6_CSUM;
+               adapter->netdev->hw_features |= NETIF_F_IPV6_CSUM;
 
        if ((adapter->netdev->features &
            (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
-               adapter->netdev->features |= NETIF_F_RXCSUM;
+               adapter->netdev->hw_features |= NETIF_F_RXCSUM;
 
        if (buf->large_tx_ipv4)
-               adapter->netdev->features |= NETIF_F_TSO;
+               adapter->netdev->hw_features |= NETIF_F_TSO;
        if (buf->large_tx_ipv6)
-               adapter->netdev->features |= NETIF_F_TSO6;
+               adapter->netdev->hw_features |= NETIF_F_TSO6;
 
-       adapter->netdev->hw_features |= adapter->netdev->features;
+       if (adapter->state == VNIC_PROBING) {
+               adapter->netdev->features |= adapter->netdev->hw_features;
+       } else if (old_hw_features != adapter->netdev->hw_features) {
+               netdev_features_t tmp = 0;
+
+               /* disable features no longer supported */
+               adapter->netdev->features &= adapter->netdev->hw_features;
+               /* turn on features now supported if previously enabled */
+               tmp = (old_hw_features ^ adapter->netdev->hw_features) &
+                       adapter->netdev->hw_features;
+               adapter->netdev->features |=
+                               tmp & adapter->netdev->wanted_features;
+       }
 
        memset(&crq, 0, sizeof(crq));
        crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
@@ -4694,7 +4711,7 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter)
        old_num_rx_queues = adapter->req_rx_queues;
        old_num_tx_queues = adapter->req_tx_queues;
 
-       init_completion(&adapter->init_done);
+       reinit_completion(&adapter->init_done);
        adapter->init_done_rc = 0;
        ibmvnic_send_crq_init(adapter);
        if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
@@ -4749,7 +4766,6 @@ static int ibmvnic_init(struct ibmvnic_adapter *adapter)
 
        adapter->from_passive_init = false;
 
-       init_completion(&adapter->init_done);
        adapter->init_done_rc = 0;
        ibmvnic_send_crq_init(adapter);
        if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
@@ -4828,6 +4844,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
        INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
        INIT_LIST_HEAD(&adapter->rwi_list);
        spin_lock_init(&adapter->rwi_lock);
+       init_completion(&adapter->init_done);
        adapter->resetting = false;
 
        adapter->mac_change_pending = false;
index a7c76732849fd13f7fe827dce6115caa4da3d4b5..6f72ab139fd9283ecf29cd609539804cca9c6095 100644 (file)
@@ -3267,7 +3267,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
                /* Make sure there is space in the ring for the next send. */
                e1000_maybe_stop_tx(netdev, tx_ring, desc_needed);
 
-               if (!skb->xmit_more ||
+               if (!netdev_xmit_more() ||
                    netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) {
                        writel(tx_ring->next_to_use, hw->hw_addr + tx_ring->tdt);
                        /* we need this if more than one processor can write to
index 745c1242a2d9be848b0548f44c81fae048814369..a8fa4a1628f5d0432797489ecca2d021d02611e8 100644 (file)
@@ -5897,7 +5897,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
                                     DIV_ROUND_UP(PAGE_SIZE,
                                                  adapter->tx_fifo_limit) + 2));
 
-               if (!skb->xmit_more ||
+               if (!netdev_xmit_more() ||
                    netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) {
                        if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
                                e1000e_update_tdt_wa(tx_ring,
index 5a0419421511fd7a9a3e44a1a7fa2c4e21f26217..2325cee76211364f6d378f41172a8b341b7f5d9a 100644 (file)
@@ -41,6 +41,8 @@ static int __init fm10k_init_module(void)
        /* create driver workqueue */
        fm10k_workqueue = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0,
                                          fm10k_driver_name);
+       if (!fm10k_workqueue)
+               return -ENOMEM;
 
        fm10k_dbg_init();
 
@@ -1035,7 +1037,7 @@ static void fm10k_tx_map(struct fm10k_ring *tx_ring,
        fm10k_maybe_stop_tx(tx_ring, DESC_NEEDED);
 
        /* notify HW of packet */
-       if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
+       if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
                writel(i, tx_ring->tail);
 
                /* we need this if more than one processor can write to our tail
index 50590e8d1fd1389926a549a7ebd7c47a0ac9e28e..2f21b3e89fd03b605f76514297ea1790a9e89b53 100644 (file)
@@ -21,6 +21,7 @@ i40e-objs := i40e_main.o \
        i40e_diag.o     \
        i40e_txrx.o     \
        i40e_ptp.o      \
+       i40e_ddp.o \
        i40e_client.o   \
        i40e_virtchnl_pf.o \
        i40e_xsk.o
index d684998ba2b03b27230916afb7012951ad987a94..c4afb852cb57cef631a941e6c3d18ff3cbe0e98e 100644 (file)
@@ -321,6 +321,29 @@ struct i40e_udp_port_config {
        u8 filter_index;
 };
 
+#define I40_DDP_FLASH_REGION 100
+#define I40E_PROFILE_INFO_SIZE 48
+#define I40E_MAX_PROFILE_NUM 16
+#define I40E_PROFILE_LIST_SIZE \
+       (I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4)
+#define I40E_DDP_PROFILE_PATH "intel/i40e/ddp/"
+#define I40E_DDP_PROFILE_NAME_MAX 64
+
+int i40e_ddp_load(struct net_device *netdev, const u8 *data, size_t size,
+                 bool is_add);
+int i40e_ddp_flash(struct net_device *netdev, struct ethtool_flash *flash);
+
+struct i40e_ddp_profile_list {
+       u32 p_count;
+       struct i40e_profile_info p_info[0];
+};
+
+struct i40e_ddp_old_profile_list {
+       struct list_head list;
+       size_t old_ddp_size;
+       u8 old_ddp_buf[0];
+};
+
 /* macros related to FLX_PIT */
 #define I40E_FLEX_SET_FSIZE(fsize) (((fsize) << \
                                    I40E_PRTQF_FLX_PIT_FSIZE_SHIFT) & \
@@ -589,6 +612,8 @@ struct i40e_pf {
        struct sk_buff *ptp_tx_skb;
        unsigned long ptp_tx_start;
        struct hwtstamp_config tstamp_config;
+       struct timespec64 ptp_prev_hw_time;
+       ktime_t ptp_reset_start;
        struct mutex tmreg_lock; /* Used to protect the SYSTIME registers. */
        u32 ptp_adj_mult;
        u32 tx_hwtstamp_timeouts;
@@ -610,6 +635,8 @@ struct i40e_pf {
        u16 override_q_count;
        u16 last_sw_conf_flags;
        u16 last_sw_conf_valid_flags;
+       /* List to keep previous DDP profiles to be rolled back in the future */
+       struct list_head ddp_old_prof;
 };
 
 /**
@@ -790,6 +817,8 @@ struct i40e_vsi {
 
        /* VSI specific handlers */
        irqreturn_t (*irq_handler)(int irq, void *data);
+
+       unsigned long *af_xdp_zc_qps; /* tracks AF_XDP ZC enabled qps */
 } ____cacheline_internodealigned_in_smp;
 
 struct i40e_netdev_priv {
@@ -1081,6 +1110,8 @@ void i40e_ptp_rx_hwtstamp(struct i40e_pf *pf, struct sk_buff *skb, u8 index);
 void i40e_ptp_set_increment(struct i40e_pf *pf);
 int i40e_ptp_set_ts_config(struct i40e_pf *pf, struct ifreq *ifr);
 int i40e_ptp_get_ts_config(struct i40e_pf *pf, struct ifreq *ifr);
+void i40e_ptp_save_hw_time(struct i40e_pf *pf);
+void i40e_ptp_restore_hw_time(struct i40e_pf *pf);
 void i40e_ptp_init(struct i40e_pf *pf);
 void i40e_ptp_stop(struct i40e_pf *pf);
 int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi);
@@ -1096,20 +1127,6 @@ static inline bool i40e_enabled_xdp_vsi(struct i40e_vsi *vsi)
        return !!vsi->xdp_prog;
 }
 
-static inline struct xdp_umem *i40e_xsk_umem(struct i40e_ring *ring)
-{
-       bool xdp_on = i40e_enabled_xdp_vsi(ring->vsi);
-       int qid = ring->queue_index;
-
-       if (ring_is_xdp(ring))
-               qid -= ring->vsi->alloc_queue_pairs;
-
-       if (!xdp_on)
-               return NULL;
-
-       return xdp_get_umem_from_qid(ring->vsi->netdev, qid);
-}
-
 int i40e_create_queue_channel(struct i40e_vsi *vsi, struct i40e_channel *ch);
 int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate);
 int i40e_add_del_cloud_filter(struct i40e_vsi *vsi,
index 7ab61f6ebb5fa23582e8b5ce1d5e8ee55502b52b..45f6adc8ff2f923b326ccc5b060ff402cb7e2ad1 100644 (file)
@@ -749,7 +749,7 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
        if (val >= hw->aq.num_asq_entries) {
                i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
                           "AQTX: head overrun at %d\n", val);
-               status = I40E_ERR_QUEUE_EMPTY;
+               status = I40E_ERR_ADMIN_QUEUE_FULL;
                goto asq_send_command_error;
        }
 
index 11506102471c14d8cf36f52488e10073d55c46dd..522058a7d4be82cb298de24992350ca5a56dfdb1 100644 (file)
@@ -11,8 +11,8 @@
  */
 
 #define I40E_FW_API_VERSION_MAJOR      0x0001
-#define I40E_FW_API_VERSION_MINOR_X722 0x0006
-#define I40E_FW_API_VERSION_MINOR_X710 0x0007
+#define I40E_FW_API_VERSION_MINOR_X722 0x0008
+#define I40E_FW_API_VERSION_MINOR_X710 0x0008
 
 #define I40E_FW_MINOR_VERSION(_h) ((_h)->mac.type == I40E_MAC_XL710 ? \
                                        I40E_FW_API_VERSION_MINOR_X710 : \
index 97a9b1fb47638b47ee97ad7206279e71ba14526a..dd6b3b3ac5c6d59b4436560fee2506ee69f6420e 100644 (file)
@@ -1466,7 +1466,6 @@ static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx)
  **/
 u32 i40e_led_get(struct i40e_hw *hw)
 {
-       u32 current_mode = 0;
        u32 mode = 0;
        int i;
 
@@ -1479,21 +1478,6 @@ u32 i40e_led_get(struct i40e_hw *hw)
                if (!gpio_val)
                        continue;
 
-               /* ignore gpio LED src mode entries related to the activity
-                * LEDs
-                */
-               current_mode = ((gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK)
-                               >> I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT);
-               switch (current_mode) {
-               case I40E_COMBINED_ACTIVITY:
-               case I40E_FILTER_ACTIVITY:
-               case I40E_MAC_ACTIVITY:
-               case I40E_LINK_ACTIVITY:
-                       continue;
-               default:
-                       break;
-               }
-
                mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >>
                        I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT;
                break;
@@ -1513,7 +1497,6 @@ u32 i40e_led_get(struct i40e_hw *hw)
  **/
 void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
 {
-       u32 current_mode = 0;
        int i;
 
        if (mode & 0xfffffff0)
@@ -1527,22 +1510,6 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
 
                if (!gpio_val)
                        continue;
-
-               /* ignore gpio LED src mode entries related to the activity
-                * LEDs
-                */
-               current_mode = ((gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK)
-                               >> I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT);
-               switch (current_mode) {
-               case I40E_COMBINED_ACTIVITY:
-               case I40E_FILTER_ACTIVITY:
-               case I40E_MAC_ACTIVITY:
-               case I40E_LINK_ACTIVITY:
-                       continue;
-               default:
-                       break;
-               }
-
                gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK;
                /* this & is a bit of paranoia, but serves as a range check */
                gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) &
@@ -5448,6 +5415,163 @@ i40e_find_segment_in_package(u32 segment_type,
        return NULL;
 }
 
+/* Get section table in profile */
+#define I40E_SECTION_TABLE(profile, sec_tbl)                           \
+       do {                                                            \
+               struct i40e_profile_segment *p = (profile);             \
+               u32 count;                                              \
+               u32 *nvm;                                               \
+               count = p->device_table_count;                          \
+               nvm = (u32 *)&p->device_table[count];                   \
+               sec_tbl = (struct i40e_section_table *)&nvm[nvm[0] + 1]; \
+       } while (0)
+
+/* Get section header in profile */
+#define I40E_SECTION_HEADER(profile, offset)                           \
+       (struct i40e_profile_section_header *)((u8 *)(profile) + (offset))
+
+/**
+ * i40e_find_section_in_profile
+ * @section_type: the section type to search for (i.e., SECTION_TYPE_NOTE)
+ * @profile: pointer to the i40e segment header to be searched
+ *
+ * This function searches i40e segment for a particular section type. On
+ * success it returns a pointer to the section header, otherwise it will
+ * return NULL.
+ **/
+struct i40e_profile_section_header *
+i40e_find_section_in_profile(u32 section_type,
+                            struct i40e_profile_segment *profile)
+{
+       struct i40e_profile_section_header *sec;
+       struct i40e_section_table *sec_tbl;
+       u32 sec_off;
+       u32 i;
+
+       if (profile->header.type != SEGMENT_TYPE_I40E)
+               return NULL;
+
+       I40E_SECTION_TABLE(profile, sec_tbl);
+
+       for (i = 0; i < sec_tbl->section_count; i++) {
+               sec_off = sec_tbl->section_offset[i];
+               sec = I40E_SECTION_HEADER(profile, sec_off);
+               if (sec->section.type == section_type)
+                       return sec;
+       }
+
+       return NULL;
+}
+
+/**
+ * i40e_ddp_exec_aq_section - Execute generic AQ for DDP
+ * @hw: pointer to the hw struct
+ * @aq: command buffer containing all data to execute AQ
+ **/
+static enum
+i40e_status_code i40e_ddp_exec_aq_section(struct i40e_hw *hw,
+                                         struct i40e_profile_aq_section *aq)
+{
+       i40e_status status;
+       struct i40e_aq_desc desc;
+       u8 *msg = NULL;
+       u16 msglen;
+
+       i40e_fill_default_direct_cmd_desc(&desc, aq->opcode);
+       desc.flags |= cpu_to_le16(aq->flags);
+       memcpy(desc.params.raw, aq->param, sizeof(desc.params.raw));
+
+       msglen = aq->datalen;
+       if (msglen) {
+               desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF |
+                                               I40E_AQ_FLAG_RD));
+               if (msglen > I40E_AQ_LARGE_BUF)
+                       desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+               desc.datalen = cpu_to_le16(msglen);
+               msg = &aq->data[0];
+       }
+
+       status = i40e_asq_send_command(hw, &desc, msg, msglen, NULL);
+
+       if (status) {
+               i40e_debug(hw, I40E_DEBUG_PACKAGE,
+                          "unable to exec DDP AQ opcode %u, error %d\n",
+                          aq->opcode, status);
+               return status;
+       }
+
+       /* copy returned desc to aq_buf */
+       memcpy(aq->param, desc.params.raw, sizeof(desc.params.raw));
+
+       return 0;
+}
+
+/**
+ * i40e_validate_profile
+ * @hw: pointer to the hardware structure
+ * @profile: pointer to the profile segment of the package to be validated
+ * @track_id: package tracking id
+ * @rollback: flag if the profile is for rollback.
+ *
+ * Validates supported devices and profile's sections.
+ */
+static enum i40e_status_code
+i40e_validate_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
+                     u32 track_id, bool rollback)
+{
+       struct i40e_profile_section_header *sec = NULL;
+       i40e_status status = 0;
+       struct i40e_section_table *sec_tbl;
+       u32 vendor_dev_id;
+       u32 dev_cnt;
+       u32 sec_off;
+       u32 i;
+
+       if (track_id == I40E_DDP_TRACKID_INVALID) {
+               i40e_debug(hw, I40E_DEBUG_PACKAGE, "Invalid track_id\n");
+               return I40E_NOT_SUPPORTED;
+       }
+
+       dev_cnt = profile->device_table_count;
+       for (i = 0; i < dev_cnt; i++) {
+               vendor_dev_id = profile->device_table[i].vendor_dev_id;
+               if ((vendor_dev_id >> 16) == PCI_VENDOR_ID_INTEL &&
+                   hw->device_id == (vendor_dev_id & 0xFFFF))
+                       break;
+       }
+       if (dev_cnt && i == dev_cnt) {
+               i40e_debug(hw, I40E_DEBUG_PACKAGE,
+                          "Device doesn't support DDP\n");
+               return I40E_ERR_DEVICE_NOT_SUPPORTED;
+       }
+
+       I40E_SECTION_TABLE(profile, sec_tbl);
+
+       /* Validate sections types */
+       for (i = 0; i < sec_tbl->section_count; i++) {
+               sec_off = sec_tbl->section_offset[i];
+               sec = I40E_SECTION_HEADER(profile, sec_off);
+               if (rollback) {
+                       if (sec->section.type == SECTION_TYPE_MMIO ||
+                           sec->section.type == SECTION_TYPE_AQ ||
+                           sec->section.type == SECTION_TYPE_RB_AQ) {
+                               i40e_debug(hw, I40E_DEBUG_PACKAGE,
+                                          "Not a roll-back package\n");
+                               return I40E_NOT_SUPPORTED;
+                       }
+               } else {
+                       if (sec->section.type == SECTION_TYPE_RB_AQ ||
+                           sec->section.type == SECTION_TYPE_RB_MMIO) {
+                               i40e_debug(hw, I40E_DEBUG_PACKAGE,
+                                          "Not an original package\n");
+                               return I40E_NOT_SUPPORTED;
+                       }
+               }
+       }
+
+       return status;
+}
+
 /**
  * i40e_write_profile
  * @hw: pointer to the hardware structure
@@ -5463,47 +5587,99 @@ i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
        i40e_status status = 0;
        struct i40e_section_table *sec_tbl;
        struct i40e_profile_section_header *sec = NULL;
-       u32 dev_cnt;
-       u32 vendor_dev_id;
-       u32 *nvm;
+       struct i40e_profile_aq_section *ddp_aq;
        u32 section_size = 0;
        u32 offset = 0, info = 0;
+       u32 sec_off;
        u32 i;
 
-       dev_cnt = profile->device_table_count;
+       status = i40e_validate_profile(hw, profile, track_id, false);
+       if (status)
+               return status;
 
-       for (i = 0; i < dev_cnt; i++) {
-               vendor_dev_id = profile->device_table[i].vendor_dev_id;
-               if ((vendor_dev_id >> 16) == PCI_VENDOR_ID_INTEL)
-                       if (hw->device_id == (vendor_dev_id & 0xFFFF))
+       I40E_SECTION_TABLE(profile, sec_tbl);
+
+       for (i = 0; i < sec_tbl->section_count; i++) {
+               sec_off = sec_tbl->section_offset[i];
+               sec = I40E_SECTION_HEADER(profile, sec_off);
+               /* Process generic admin command */
+               if (sec->section.type == SECTION_TYPE_AQ) {
+                       ddp_aq = (struct i40e_profile_aq_section *)&sec[1];
+                       status = i40e_ddp_exec_aq_section(hw, ddp_aq);
+                       if (status) {
+                               i40e_debug(hw, I40E_DEBUG_PACKAGE,
+                                          "Failed to execute aq: section %d, opcode %u\n",
+                                          i, ddp_aq->opcode);
                                break;
+                       }
+                       sec->section.type = SECTION_TYPE_RB_AQ;
+               }
+
+               /* Skip any non-mmio sections */
+               if (sec->section.type != SECTION_TYPE_MMIO)
+                       continue;
+
+               section_size = sec->section.size +
+                       sizeof(struct i40e_profile_section_header);
+
+               /* Write MMIO section */
+               status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size,
+                                          track_id, &offset, &info, NULL);
+               if (status) {
+                       i40e_debug(hw, I40E_DEBUG_PACKAGE,
+                                  "Failed to write profile: section %d, offset %d, info %d\n",
+                                  i, offset, info);
+                       break;
+               }
        }
-       if (i == dev_cnt) {
-               i40e_debug(hw, I40E_DEBUG_PACKAGE, "Device doesn't support DDP");
-               return I40E_ERR_DEVICE_NOT_SUPPORTED;
-       }
+       return status;
+}
+
+/**
+ * i40e_rollback_profile
+ * @hw: pointer to the hardware structure
+ * @profile: pointer to the profile segment of the package to be removed
+ * @track_id: package tracking id
+ *
+ * Rolls back previously loaded package.
+ */
+enum i40e_status_code
+i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
+                     u32 track_id)
+{
+       struct i40e_profile_section_header *sec = NULL;
+       i40e_status status = 0;
+       struct i40e_section_table *sec_tbl;
+       u32 offset = 0, info = 0;
+       u32 section_size = 0;
+       u32 sec_off;
+       int i;
+
+       status = i40e_validate_profile(hw, profile, track_id, true);
+       if (status)
+               return status;
 
-       nvm = (u32 *)&profile->device_table[dev_cnt];
-       sec_tbl = (struct i40e_section_table *)&nvm[nvm[0] + 1];
+       I40E_SECTION_TABLE(profile, sec_tbl);
 
-       for (i = 0; i < sec_tbl->section_count; i++) {
-               sec = (struct i40e_profile_section_header *)((u8 *)profile +
-                                            sec_tbl->section_offset[i]);
+       /* For rollback write sections in reverse */
+       for (i = sec_tbl->section_count - 1; i >= 0; i--) {
+               sec_off = sec_tbl->section_offset[i];
+               sec = I40E_SECTION_HEADER(profile, sec_off);
 
-               /* Skip 'AQ', 'note' and 'name' sections */
-               if (sec->section.type != SECTION_TYPE_MMIO)
+               /* Skip any non-rollback sections */
+               if (sec->section.type != SECTION_TYPE_RB_MMIO)
                        continue;
 
                section_size = sec->section.size +
                        sizeof(struct i40e_profile_section_header);
 
-               /* Write profile */
+               /* Write roll-back MMIO section */
                status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size,
                                           track_id, &offset, &info, NULL);
                if (status) {
                        i40e_debug(hw, I40E_DEBUG_PACKAGE,
-                                  "Failed to write profile: offset %d, info %d",
-                                  offset, info);
+                                  "Failed to write profile: section %d, offset %d, info %d\n",
+                                  i, offset, info);
                        break;
                }
        }
index 56bff8faf37185fa9c6f45910aad6eb146dc07c5..292eeb3def10581b5ea8ef0bae730b04ba147454 100644 (file)
@@ -863,22 +863,23 @@ i40e_status i40e_get_dcb_config(struct i40e_hw *hw)
 /**
  * i40e_init_dcb
  * @hw: pointer to the hw struct
+ * @enable_mib_change: enable mib change event
  *
  * Update DCB configuration from the Firmware
  **/
-i40e_status i40e_init_dcb(struct i40e_hw *hw)
+i40e_status i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change)
 {
        i40e_status ret = 0;
        struct i40e_lldp_variables lldp_cfg;
        u8 adminstatus = 0;
 
        if (!hw->func_caps.dcb)
-               return ret;
+               return I40E_NOT_SUPPORTED;
 
        /* Read LLDP NVM area */
        ret = i40e_read_lldp_cfg(hw, &lldp_cfg);
        if (ret)
-               return ret;
+               return I40E_ERR_NOT_READY;
 
        /* Get the LLDP AdminStatus for the current port */
        adminstatus = lldp_cfg.adminstatus >> (hw->port * 4);
@@ -887,7 +888,7 @@ i40e_status i40e_init_dcb(struct i40e_hw *hw)
        /* LLDP agent disabled */
        if (!adminstatus) {
                hw->dcbx_status = I40E_DCBX_STATUS_DISABLED;
-               return ret;
+               return I40E_ERR_NOT_READY;
        }
 
        /* Get DCBX status */
@@ -896,26 +897,19 @@ i40e_status i40e_init_dcb(struct i40e_hw *hw)
                return ret;
 
        /* Check the DCBX Status */
-       switch (hw->dcbx_status) {
-       case I40E_DCBX_STATUS_DONE:
-       case I40E_DCBX_STATUS_IN_PROGRESS:
+       if (hw->dcbx_status == I40E_DCBX_STATUS_DONE ||
+           hw->dcbx_status == I40E_DCBX_STATUS_IN_PROGRESS) {
                /* Get current DCBX configuration */
                ret = i40e_get_dcb_config(hw);
                if (ret)
                        return ret;
-               break;
-       case I40E_DCBX_STATUS_DISABLED:
-               return ret;
-       case I40E_DCBX_STATUS_NOT_STARTED:
-       case I40E_DCBX_STATUS_MULTIPLE_PEERS:
-       default:
-               break;
+       } else if (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED) {
+               return I40E_ERR_NOT_READY;
        }
 
        /* Configure the LLDP MIB change event */
-       ret = i40e_aq_cfg_lldp_mib_change_event(hw, true, NULL);
-       if (ret)
-               return ret;
+       if (enable_mib_change)
+               ret = i40e_aq_cfg_lldp_mib_change_event(hw, true, NULL);
 
        return ret;
 }
index 2b748a60a843ce704ddcd34849607c638c1f9b86..ddb48ae7cce468cfc8f63aa3e4aeec0d234db056 100644 (file)
@@ -124,5 +124,5 @@ i40e_status i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type,
                                             u8 bridgetype,
                                             struct i40e_dcbx_config *dcbcfg);
 i40e_status i40e_get_dcb_config(struct i40e_hw *hw);
-i40e_status i40e_init_dcb(struct i40e_hw *hw);
+i40e_status i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change);
 #endif /* _I40E_DCB_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ddp.c b/drivers/net/ethernet/intel/i40e/i40e_ddp.c
new file mode 100644 (file)
index 0000000..5e08f10
--- /dev/null
@@ -0,0 +1,481 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
+
+#include "i40e.h"
+
+#include <linux/firmware.h>
+
+/**
+ * i40e_ddp_profiles_eq - checks if DDP profiles are the equivalent
+ * @a: new profile info
+ * @b: old profile info
+ *
+ * checks if DDP profiles are the equivalent.
+ * Returns true if profiles are the same.
+ **/
+static bool i40e_ddp_profiles_eq(struct i40e_profile_info *a,
+                                struct i40e_profile_info *b)
+{
+       return a->track_id == b->track_id &&
+               !memcmp(&a->version, &b->version, sizeof(a->version)) &&
+               !memcmp(&a->name, &b->name, I40E_DDP_NAME_SIZE);
+}
+
+/**
+ * i40e_ddp_does_profile_exist - checks if DDP profile loaded already
+ * @hw: HW data structure
+ * @pinfo: DDP profile information structure
+ *
+ * checks if DDP profile loaded already.
+ * Returns >0 if the profile exists.
+ * Returns  0 if the profile is absent.
+ * Returns <0 if error.
+ **/
+static int i40e_ddp_does_profile_exist(struct i40e_hw *hw,
+                                      struct i40e_profile_info *pinfo)
+{
+       struct i40e_ddp_profile_list *profile_list;
+       u8 buff[I40E_PROFILE_LIST_SIZE];
+       i40e_status status;
+       int i;
+
+       status = i40e_aq_get_ddp_list(hw, buff, I40E_PROFILE_LIST_SIZE, 0,
+                                     NULL);
+       if (status)
+               return -1;
+
+       profile_list = (struct i40e_ddp_profile_list *)buff;
+       for (i = 0; i < profile_list->p_count; i++) {
+               if (i40e_ddp_profiles_eq(pinfo, &profile_list->p_info[i]))
+                       return 1;
+       }
+       return 0;
+}
+
+/**
+ * i40e_ddp_profiles_overlap - checks if DDP profiles overlap.
+ * @new: new profile info
+ * @old: old profile info
+ *
+ * checks if DDP profiles overlap.
+ * Returns true if profiles are overlap.
+ **/
+static bool i40e_ddp_profiles_overlap(struct i40e_profile_info *new,
+                                     struct i40e_profile_info *old)
+{
+       unsigned int group_id_old = (u8)((old->track_id & 0x00FF0000) >> 16);
+       unsigned int group_id_new = (u8)((new->track_id & 0x00FF0000) >> 16);
+
+       /* 0x00 group must be only the first */
+       if (group_id_new == 0)
+               return true;
+       /* 0xFF group is compatible with anything else */
+       if (group_id_new == 0xFF || group_id_old == 0xFF)
+               return false;
+       /* otherwise only profiles from the same group are compatible*/
+       return group_id_old != group_id_new;
+}
+
+/**
+ * i40e_ddp_does_profiles_ - checks if DDP overlaps with existing one.
+ * @hw: HW data structure
+ * @pinfo: DDP profile information structure
+ *
+ * checks if DDP profile overlaps with existing one.
+ * Returns >0 if the profile overlaps.
+ * Returns  0 if the profile is ok.
+ * Returns <0 if error.
+ **/
+static int i40e_ddp_does_profile_overlap(struct i40e_hw *hw,
+                                        struct i40e_profile_info *pinfo)
+{
+       struct i40e_ddp_profile_list *profile_list;
+       u8 buff[I40E_PROFILE_LIST_SIZE];
+       i40e_status status;
+       int i;
+
+       status = i40e_aq_get_ddp_list(hw, buff, I40E_PROFILE_LIST_SIZE, 0,
+                                     NULL);
+       if (status)
+               return -EIO;
+
+       profile_list = (struct i40e_ddp_profile_list *)buff;
+       for (i = 0; i < profile_list->p_count; i++) {
+               if (i40e_ddp_profiles_overlap(pinfo,
+                                             &profile_list->p_info[i]))
+                       return 1;
+       }
+       return 0;
+}
+
+/**
+ * i40e_add_pinfo
+ * @hw: pointer to the hardware structure
+ * @profile: pointer to the profile segment of the package
+ * @profile_info_sec: buffer for information section
+ * @track_id: package tracking id
+ *
+ * Register a profile to the list of loaded profiles.
+ */
+static enum i40e_status_code
+i40e_add_pinfo(struct i40e_hw *hw, struct i40e_profile_segment *profile,
+              u8 *profile_info_sec, u32 track_id)
+{
+       struct i40e_profile_section_header *sec;
+       struct i40e_profile_info *pinfo;
+       i40e_status status;
+       u32 offset = 0, info = 0;
+
+       sec = (struct i40e_profile_section_header *)profile_info_sec;
+       sec->tbl_size = 1;
+       sec->data_end = sizeof(struct i40e_profile_section_header) +
+                       sizeof(struct i40e_profile_info);
+       sec->section.type = SECTION_TYPE_INFO;
+       sec->section.offset = sizeof(struct i40e_profile_section_header);
+       sec->section.size = sizeof(struct i40e_profile_info);
+       pinfo = (struct i40e_profile_info *)(profile_info_sec +
+                                            sec->section.offset);
+       pinfo->track_id = track_id;
+       pinfo->version = profile->version;
+       pinfo->op = I40E_DDP_ADD_TRACKID;
+
+       /* Clear reserved field */
+       memset(pinfo->reserved, 0, sizeof(pinfo->reserved));
+       memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE);
+
+       status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end,
+                                  track_id, &offset, &info, NULL);
+       return status;
+}
+
+/**
+ * i40e_del_pinfo - delete DDP profile info from NIC
+ * @hw: HW data structure
+ * @profile: DDP profile segment to be deleted
+ * @profile_info_sec: DDP profile section header
+ * @track_id: track ID of the profile for deletion
+ *
+ * Removes DDP profile from the NIC.
+ **/
+static enum i40e_status_code
+i40e_del_pinfo(struct i40e_hw *hw, struct i40e_profile_segment *profile,
+              u8 *profile_info_sec, u32 track_id)
+{
+       struct i40e_profile_section_header *sec;
+       struct i40e_profile_info *pinfo;
+       i40e_status status;
+       u32 offset = 0, info = 0;
+
+       sec = (struct i40e_profile_section_header *)profile_info_sec;
+       sec->tbl_size = 1;
+       sec->data_end = sizeof(struct i40e_profile_section_header) +
+                       sizeof(struct i40e_profile_info);
+       sec->section.type = SECTION_TYPE_INFO;
+       sec->section.offset = sizeof(struct i40e_profile_section_header);
+       sec->section.size = sizeof(struct i40e_profile_info);
+       pinfo = (struct i40e_profile_info *)(profile_info_sec +
+                                            sec->section.offset);
+       pinfo->track_id = track_id;
+       pinfo->version = profile->version;
+       pinfo->op = I40E_DDP_REMOVE_TRACKID;
+
+       /* Clear reserved field */
+       memset(pinfo->reserved, 0, sizeof(pinfo->reserved));
+       memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE);
+
+       status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end,
+                                  track_id, &offset, &info, NULL);
+       return status;
+}
+
+/**
+ * i40e_ddp_is_pkg_hdr_valid - performs basic pkg header integrity checks
+ * @netdev: net device structure (for logging purposes)
+ * @pkg_hdr: pointer to package header
+ * @size_huge: size of the whole DDP profile package in size_t
+ *
+ * Checks correctness of pkg header: Version, size too big/small, and
+ * all segment offsets alignment and boundaries. This function lets
+ * reject non DDP profile file to be loaded by administrator mistake.
+ **/
+static bool i40e_ddp_is_pkg_hdr_valid(struct net_device *netdev,
+                                     struct i40e_package_header *pkg_hdr,
+                                     size_t size_huge)
+{
+       u32 size = 0xFFFFFFFFU & size_huge;
+       u32 pkg_hdr_size;
+       u32 segment;
+
+       if (!pkg_hdr)
+               return false;
+
+       if (pkg_hdr->version.major > 0) {
+               struct i40e_ddp_version ver = pkg_hdr->version;
+
+               netdev_err(netdev, "Unsupported DDP profile version %u.%u.%u.%u",
+                          ver.major, ver.minor, ver.update, ver.draft);
+               return false;
+       }
+       if (size_huge > size) {
+               netdev_err(netdev, "Invalid DDP profile - size is bigger than 4G");
+               return false;
+       }
+       if (size < (sizeof(struct i40e_package_header) +
+               sizeof(struct i40e_metadata_segment) + sizeof(u32) * 2)) {
+               netdev_err(netdev, "Invalid DDP profile - size is too small.");
+               return false;
+       }
+
+       pkg_hdr_size = sizeof(u32) * (pkg_hdr->segment_count + 2U);
+       if (size < pkg_hdr_size) {
+               netdev_err(netdev, "Invalid DDP profile - too many segments");
+               return false;
+       }
+       for (segment = 0; segment < pkg_hdr->segment_count; ++segment) {
+               u32 offset = pkg_hdr->segment_offset[segment];
+
+               if (0xFU & offset) {
+                       netdev_err(netdev,
+                                  "Invalid DDP profile %u segment alignment",
+                                  segment);
+                       return false;
+               }
+               if (pkg_hdr_size > offset || offset >= size) {
+                       netdev_err(netdev,
+                                  "Invalid DDP profile %u segment offset",
+                                  segment);
+                       return false;
+               }
+       }
+
+       return true;
+}
+
+/**
+ * i40e_ddp_load - performs DDP loading
+ * @netdev: net device structure
+ * @data: buffer containing recipe file
+ * @size: size of the buffer
+ * @is_add: true when loading profile, false when rolling back the previous one
+ *
+ * Checks correctness and loads DDP profile to the NIC. The function is
+ * also used for rolling back previously loaded profile.
+ **/
+int i40e_ddp_load(struct net_device *netdev, const u8 *data, size_t size,
+                 bool is_add)
+{
+       u8 profile_info_sec[sizeof(struct i40e_profile_section_header) +
+                           sizeof(struct i40e_profile_info)];
+       struct i40e_metadata_segment *metadata_hdr;
+       struct i40e_profile_segment *profile_hdr;
+       struct i40e_profile_info pinfo;
+       struct i40e_package_header *pkg_hdr;
+       i40e_status status;
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_vsi *vsi = np->vsi;
+       struct i40e_pf *pf = vsi->back;
+       u32 track_id;
+       int istatus;
+
+       pkg_hdr = (struct i40e_package_header *)data;
+       if (!i40e_ddp_is_pkg_hdr_valid(netdev, pkg_hdr, size))
+               return -EINVAL;
+
+       if (size < (sizeof(struct i40e_package_header) +
+                   sizeof(struct i40e_metadata_segment) + sizeof(u32) * 2)) {
+               netdev_err(netdev, "Invalid DDP recipe size.");
+               return -EINVAL;
+       }
+
+       /* Find beginning of segment data in buffer */
+       metadata_hdr = (struct i40e_metadata_segment *)
+               i40e_find_segment_in_package(SEGMENT_TYPE_METADATA, pkg_hdr);
+       if (!metadata_hdr) {
+               netdev_err(netdev, "Failed to find metadata segment in DDP recipe.");
+               return -EINVAL;
+       }
+
+       track_id = metadata_hdr->track_id;
+       profile_hdr = (struct i40e_profile_segment *)
+               i40e_find_segment_in_package(SEGMENT_TYPE_I40E, pkg_hdr);
+       if (!profile_hdr) {
+               netdev_err(netdev, "Failed to find profile segment in DDP recipe.");
+               return -EINVAL;
+       }
+
+       pinfo.track_id = track_id;
+       pinfo.version = profile_hdr->version;
+       if (is_add)
+               pinfo.op = I40E_DDP_ADD_TRACKID;
+       else
+               pinfo.op = I40E_DDP_REMOVE_TRACKID;
+
+       memcpy(pinfo.name, profile_hdr->name, I40E_DDP_NAME_SIZE);
+
+       /* Check if profile data already exists*/
+       istatus = i40e_ddp_does_profile_exist(&pf->hw, &pinfo);
+       if (istatus < 0) {
+               netdev_err(netdev, "Failed to fetch loaded profiles.");
+               return istatus;
+       }
+       if (is_add) {
+               if (istatus > 0) {
+                       netdev_err(netdev, "DDP profile already loaded.");
+                       return -EINVAL;
+               }
+               istatus = i40e_ddp_does_profile_overlap(&pf->hw, &pinfo);
+               if (istatus < 0) {
+                       netdev_err(netdev, "Failed to fetch loaded profiles.");
+                       return istatus;
+               }
+               if (istatus > 0) {
+                       netdev_err(netdev, "DDP profile overlaps with existing one.");
+                       return -EINVAL;
+               }
+       } else {
+               if (istatus == 0) {
+                       netdev_err(netdev,
+                                  "DDP profile for deletion does not exist.");
+                       return -EINVAL;
+               }
+       }
+
+       /* Load profile data */
+       if (is_add) {
+               status = i40e_write_profile(&pf->hw, profile_hdr, track_id);
+               if (status) {
+                       if (status == I40E_ERR_DEVICE_NOT_SUPPORTED) {
+                               netdev_err(netdev,
+                                          "Profile is not supported by the device.");
+                               return -EPERM;
+                       }
+                       netdev_err(netdev, "Failed to write DDP profile.");
+                       return -EIO;
+               }
+       } else {
+               status = i40e_rollback_profile(&pf->hw, profile_hdr, track_id);
+               if (status) {
+                       netdev_err(netdev, "Failed to remove DDP profile.");
+                       return -EIO;
+               }
+       }
+
+       /* Add/remove profile to/from profile list in FW */
+       if (is_add) {
+               status = i40e_add_pinfo(&pf->hw, profile_hdr, profile_info_sec,
+                                       track_id);
+               if (status) {
+                       netdev_err(netdev, "Failed to add DDP profile info.");
+                       return -EIO;
+               }
+       } else {
+               status = i40e_del_pinfo(&pf->hw, profile_hdr, profile_info_sec,
+                                       track_id);
+               if (status) {
+                       netdev_err(netdev, "Failed to restore DDP profile info.");
+                       return -EIO;
+               }
+       }
+
+       return 0;
+}
+
+/**
+ * i40e_ddp_restore - restore previously loaded profile and remove from list
+ * @pf: PF data struct
+ *
+ * Restores previously loaded profile stored on the list in driver memory.
+ * After rolling back removes entry from the list.
+ **/
+static int i40e_ddp_restore(struct i40e_pf *pf)
+{
+       struct i40e_ddp_old_profile_list *entry;
+       struct net_device *netdev = pf->vsi[pf->lan_vsi]->netdev;
+       int status = 0;
+
+       if (!list_empty(&pf->ddp_old_prof)) {
+               entry = list_first_entry(&pf->ddp_old_prof,
+                                        struct i40e_ddp_old_profile_list,
+                                        list);
+               status = i40e_ddp_load(netdev, entry->old_ddp_buf,
+                                      entry->old_ddp_size, false);
+               list_del(&entry->list);
+               kfree(entry);
+       }
+       return status;
+}
+
+/**
+ * i40e_ddp_flash - callback function for ethtool flash feature
+ * @netdev: net device structure
+ * @flash: kernel flash structure
+ *
+ * Ethtool callback function used for loading and unloading DDP profiles.
+ **/
+int i40e_ddp_flash(struct net_device *netdev, struct ethtool_flash *flash)
+{
+       const struct firmware *ddp_config;
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_vsi *vsi = np->vsi;
+       struct i40e_pf *pf = vsi->back;
+       int status = 0;
+
+       /* Check for valid region first */
+       if (flash->region != I40_DDP_FLASH_REGION) {
+               netdev_err(netdev, "Requested firmware region is not recognized by this driver.");
+               return -EINVAL;
+       }
+       if (pf->hw.bus.func != 0) {
+               netdev_err(netdev, "Any DDP operation is allowed only on Phy0 NIC interface");
+               return -EINVAL;
+       }
+
+       /* If the user supplied "-" instead of file name rollback previously
+        * stored profile.
+        */
+       if (strncmp(flash->data, "-", 2) != 0) {
+               struct i40e_ddp_old_profile_list *list_entry;
+               char profile_name[sizeof(I40E_DDP_PROFILE_PATH)
+                                 + I40E_DDP_PROFILE_NAME_MAX];
+
+               profile_name[sizeof(profile_name) - 1] = 0;
+               strncpy(profile_name, I40E_DDP_PROFILE_PATH,
+                       sizeof(profile_name) - 1);
+               strncat(profile_name, flash->data, I40E_DDP_PROFILE_NAME_MAX);
+               /* Load DDP recipe. */
+               status = request_firmware(&ddp_config, profile_name,
+                                         &netdev->dev);
+               if (status) {
+                       netdev_err(netdev, "DDP recipe file request failed.");
+                       return status;
+               }
+
+               status = i40e_ddp_load(netdev, ddp_config->data,
+                                      ddp_config->size, true);
+
+               if (!status) {
+                       list_entry =
+                         kzalloc(sizeof(struct i40e_ddp_old_profile_list) +
+                                 ddp_config->size, GFP_KERNEL);
+                       if (!list_entry) {
+                               netdev_info(netdev, "Failed to allocate memory for previous DDP profile data.");
+                               netdev_info(netdev, "New profile loaded but roll-back will be impossible.");
+                       } else {
+                               memcpy(list_entry->old_ddp_buf,
+                                      ddp_config->data, ddp_config->size);
+                               list_entry->old_ddp_size = ddp_config->size;
+                               list_add(&list_entry->list, &pf->ddp_old_prof);
+                       }
+               }
+
+               release_firmware(ddp_config);
+       } else {
+               if (!list_empty(&pf->ddp_old_prof)) {
+                       status = i40e_ddp_restore(pf);
+               } else {
+                       netdev_warn(netdev, "There is no DDP profile to restore.");
+                       status = -ENOENT;
+               }
+       }
+       return status;
+}
index 4c885801fa2699432d0eaaaca8c4487081ee36b0..9eaea1bee4a122bd935b35054600565c15049875 100644 (file)
@@ -535,9 +535,12 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf,
                        ethtool_link_ksettings_add_link_mode(ks, advertising,
                                                             1000baseT_Full);
        }
-       if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_SR4)
+       if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_SR4) {
                ethtool_link_ksettings_add_link_mode(ks, supported,
                                                     40000baseSR4_Full);
+               ethtool_link_ksettings_add_link_mode(ks, advertising,
+                                                    40000baseSR4_Full);
+       }
        if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_LR4)
                ethtool_link_ksettings_add_link_mode(ks, supported,
                                                     40000baseLR4_Full);
@@ -724,6 +727,8 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
        case I40E_PHY_TYPE_40GBASE_SR4:
                ethtool_link_ksettings_add_link_mode(ks, supported,
                                                     40000baseSR4_Full);
+               ethtool_link_ksettings_add_link_mode(ks, advertising,
+                                                    40000baseSR4_Full);
                break;
        case I40E_PHY_TYPE_40GBASE_LR4:
                ethtool_link_ksettings_add_link_mode(ks, supported,
@@ -2573,8 +2578,7 @@ static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
                return -EOPNOTSUPP;
 
        /* only magic packet is supported */
-       if (wol->wolopts && (wol->wolopts != WAKE_MAGIC)
-                         | (wol->wolopts != WAKE_FILTER))
+       if (wol->wolopts & ~WAKE_MAGIC)
                return -EOPNOTSUPP;
 
        /* is this a new value? */
@@ -5172,6 +5176,7 @@ static const struct ethtool_ops i40e_ethtool_ops = {
        .set_link_ksettings     = i40e_set_link_ksettings,
        .get_fecparam = i40e_get_fec_param,
        .set_fecparam = i40e_set_fec_param,
+       .flash_device = i40e_ddp_flash,
 };
 
 void i40e_set_ethtool_ops(struct net_device *netdev)
index da62218eb70ad3f4c95111c3c67f3a4dd541aa50..65c2b9d2652b215de260f1e2d5502bebe73ea957 100644 (file)
@@ -2107,11 +2107,22 @@ void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
        fcnt = i40e_update_filter_state(num_add, list, add_head);
 
        if (fcnt != num_add) {
-               set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
-               dev_warn(&vsi->back->pdev->dev,
-                        "Error %s adding RX filters on %s, promiscuous mode forced on\n",
-                        i40e_aq_str(hw, aq_err),
-                        vsi_name);
+               if (vsi->type == I40E_VSI_MAIN) {
+                       set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
+                       dev_warn(&vsi->back->pdev->dev,
+                                "Error %s adding RX filters on %s, promiscuous mode forced on\n",
+                                i40e_aq_str(hw, aq_err), vsi_name);
+               } else if (vsi->type == I40E_VSI_SRIOV ||
+                          vsi->type == I40E_VSI_VMDQ1 ||
+                          vsi->type == I40E_VSI_VMDQ2) {
+                       dev_warn(&vsi->back->pdev->dev,
+                                "Error %s adding RX filters on %s, please set promiscuous on manually for %s\n",
+                                i40e_aq_str(hw, aq_err), vsi_name, vsi_name);
+               } else {
+                       dev_warn(&vsi->back->pdev->dev,
+                                "Error %s adding RX filters on %s, incorrect VSI type: %i.\n",
+                                i40e_aq_str(hw, aq_err), vsi_name, vsi->type);
+               }
        }
 }
 
@@ -2654,6 +2665,10 @@ void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
        struct i40e_vsi_context ctxt;
        i40e_status ret;
 
+       /* Don't modify stripping options if a port VLAN is active */
+       if (vsi->info.pvid)
+               return;
+
        if ((vsi->info.valid_sections &
             cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
            ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0))
@@ -2684,6 +2699,10 @@ void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
        struct i40e_vsi_context ctxt;
        i40e_status ret;
 
+       /* Don't modify stripping options if a port VLAN is active */
+       if (vsi->info.pvid)
+               return;
+
        if ((vsi->info.valid_sections &
             cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
            ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
@@ -3063,6 +3082,26 @@ static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
                            ring->queue_index);
 }
 
+/**
+ * i40e_xsk_umem - Retrieve the AF_XDP ZC if XDP and ZC is enabled
+ * @ring: The Tx or Rx ring
+ *
+ * Returns the UMEM or NULL.
+ **/
+static struct xdp_umem *i40e_xsk_umem(struct i40e_ring *ring)
+{
+       bool xdp_on = i40e_enabled_xdp_vsi(ring->vsi);
+       int qid = ring->queue_index;
+
+       if (ring_is_xdp(ring))
+               qid -= ring->vsi->alloc_queue_pairs;
+
+       if (!xdp_on || !test_bit(qid, ring->vsi->af_xdp_zc_qps))
+               return NULL;
+
+       return xdp_get_umem_from_qid(ring->vsi->netdev, qid);
+}
+
 /**
  * i40e_configure_tx_ring - Configure a transmit ring context and rest
  * @ring: The Tx ring to configure
@@ -6383,7 +6422,7 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
                goto out;
 
        /* Get the initial DCB configuration */
-       err = i40e_init_dcb(hw);
+       err = i40e_init_dcb(hw, true);
        if (!err) {
                /* Device/Function is not DCBX capable */
                if ((!hw->func_caps.dcb) ||
@@ -6826,10 +6865,12 @@ static int i40e_setup_tc(struct net_device *netdev, void *type_data)
        struct i40e_pf *pf = vsi->back;
        u8 enabled_tc = 0, num_tc, hw;
        bool need_reset = false;
+       int old_queue_pairs;
        int ret = -EINVAL;
        u16 mode;
        int i;
 
+       old_queue_pairs = vsi->num_queue_pairs;
        num_tc = mqprio_qopt->qopt.num_tc;
        hw = mqprio_qopt->qopt.hw;
        mode = mqprio_qopt->mode;
@@ -6930,6 +6971,7 @@ static int i40e_setup_tc(struct net_device *netdev, void *type_data)
                }
                ret = i40e_configure_queue_channels(vsi);
                if (ret) {
+                       vsi->num_queue_pairs = old_queue_pairs;
                        netdev_info(netdev,
                                    "Failed configuring queue channels\n");
                        need_reset = true;
@@ -9270,6 +9312,11 @@ static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired)
                        dev_warn(&pf->pdev->dev,
                                 "shutdown_lan_hmc failed: %d\n", ret);
        }
+
+       /* Save the current PTP time so that we can restore the time after the
+        * reset completes.
+        */
+       i40e_ptp_save_hw_time(pf);
 }
 
 /**
@@ -10064,6 +10111,12 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
        hash_init(vsi->mac_filter_hash);
        vsi->irqs_ready = false;
 
+       if (type == I40E_VSI_MAIN) {
+               vsi->af_xdp_zc_qps = bitmap_zalloc(pf->num_lan_qps, GFP_KERNEL);
+               if (!vsi->af_xdp_zc_qps)
+                       goto err_rings;
+       }
+
        ret = i40e_set_num_rings_in_vsi(vsi);
        if (ret)
                goto err_rings;
@@ -10082,6 +10135,7 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
        goto unlock_pf;
 
 err_rings:
+       bitmap_free(vsi->af_xdp_zc_qps);
        pf->next_vsi = i - 1;
        kfree(vsi);
 unlock_pf:
@@ -10162,6 +10216,7 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi)
        i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
        i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
 
+       bitmap_free(vsi->af_xdp_zc_qps);
        i40e_vsi_free_arrays(vsi, true);
        i40e_clear_rss_config_user(vsi);
 
@@ -13956,6 +14011,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        INIT_LIST_HEAD(&pf->l3_flex_pit_list);
        INIT_LIST_HEAD(&pf->l4_flex_pit_list);
+       INIT_LIST_HEAD(&pf->ddp_old_prof);
 
        /* set up the locks for the AQ, do this only once in probe
         * and destroy them only once in remove
@@ -14014,7 +14070,11 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (err) {
                if (err == I40E_ERR_FIRMWARE_API_VERSION)
                        dev_info(&pdev->dev,
-                                "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
+                                "The driver for the device stopped because the NVM image v%u.%u is newer than expected v%u.%u. You must install the most recent version of the network driver.\n",
+                                hw->aq.api_maj_ver,
+                                hw->aq.api_min_ver,
+                                I40E_FW_API_VERSION_MAJOR,
+                                I40E_FW_MINOR_VERSION(hw));
                else
                        dev_info(&pdev->dev,
                                 "The driver for the device stopped because the device firmware failed to init. Try updating your NVM image.\n");
@@ -14032,10 +14092,18 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
            hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw))
                dev_info(&pdev->dev,
-                        "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
+                        "The driver for the device detected a newer version of the NVM image v%u.%u than expected v%u.%u. Please install the most recent version of the network driver.\n",
+                        hw->aq.api_maj_ver,
+                        hw->aq.api_min_ver,
+                        I40E_FW_API_VERSION_MAJOR,
+                        I40E_FW_MINOR_VERSION(hw));
        else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4)
                dev_info(&pdev->dev,
-                        "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
+                        "The driver for the device detected an older version of the NVM image v%u.%u than expected v%u.%u. Please update the NVM image.\n",
+                        hw->aq.api_maj_ver,
+                        hw->aq.api_min_ver,
+                        I40E_FW_API_VERSION_MAJOR,
+                        I40E_FW_MINOR_VERSION(hw));
 
        i40e_verify_eeprom(pf);
 
index e08d754824b13e97f7e7db73d762668aeff87931..663c8bf4d3d81ea4f42f9ba11c71f6a2849b8d9e 100644 (file)
@@ -429,10 +429,16 @@ i40e_status i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
 struct i40e_generic_seg_header *
 i40e_find_segment_in_package(u32 segment_type,
                             struct i40e_package_header *pkg_header);
+struct i40e_profile_section_header *
+i40e_find_section_in_profile(u32 section_type,
+                            struct i40e_profile_segment *profile);
 enum i40e_status_code
 i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *i40e_seg,
                   u32 track_id);
 enum i40e_status_code
+i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *i40e_seg,
+                     u32 track_id);
+enum i40e_status_code
 i40e_add_pinfo_to_list(struct i40e_hw *hw,
                       struct i40e_profile_segment *profile,
                       u8 *profile_info_sec, u32 track_id);
index 5fb4353c742b9038d3ac7f867163d35cd3c3142d..439c35f0c581cc66b8f69fb706ac0f4a82e8a8c3 100644 (file)
@@ -146,12 +146,13 @@ static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
 static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
 {
        struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
-       struct timespec64 now;
+       struct timespec64 now, then;
 
+       then = ns_to_timespec64(delta);
        mutex_lock(&pf->tmreg_lock);
 
        i40e_ptp_read(pf, &now, NULL);
-       timespec64_add_ns(&now, delta);
+       now = timespec64_add(now, then);
        i40e_ptp_write(pf, (const struct timespec64 *)&now);
 
        mutex_unlock(&pf->tmreg_lock);
@@ -724,9 +725,56 @@ static long i40e_ptp_create_clock(struct i40e_pf *pf)
        pf->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
        pf->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
 
+       /* Set the previous "reset" time to the current Kernel clock time */
+       pf->ptp_prev_hw_time = ktime_to_timespec64(ktime_get_real());
+       pf->ptp_reset_start = ktime_get();
+
        return 0;
 }
 
+/**
+ * i40e_ptp_save_hw_time - Save the current PTP time as ptp_prev_hw_time
+ * @pf: Board private structure
+ *
+ * Read the current PTP time and save it into pf->ptp_prev_hw_time. This should
+ * be called at the end of preparing to reset, just before hardware reset
+ * occurs, in order to preserve the PTP time as close as possible across
+ * resets.
+ */
+void i40e_ptp_save_hw_time(struct i40e_pf *pf)
+{
+       /* don't try to access the PTP clock if it's not enabled */
+       if (!(pf->flags & I40E_FLAG_PTP))
+               return;
+
+       i40e_ptp_gettimex(&pf->ptp_caps, &pf->ptp_prev_hw_time, NULL);
+       /* Get a monotonic starting time for this reset */
+       pf->ptp_reset_start = ktime_get();
+}
+
+/**
+ * i40e_ptp_restore_hw_time - Restore the ptp_prev_hw_time + delta to PTP regs
+ * @pf: Board private structure
+ *
+ * Restore the PTP hardware clock registers. We previously cached the PTP
+ * hardware time as pf->ptp_prev_hw_time. To be as accurate as possible,
+ * update this value based on the time delta since the time was saved, using
+ * CLOCK_MONOTONIC (via ktime_get()) to calculate the time difference.
+ *
+ * This ensures that the hardware clock is restored to nearly what it should
+ * have been if a reset had not occurred.
+ */
+void i40e_ptp_restore_hw_time(struct i40e_pf *pf)
+{
+       ktime_t delta = ktime_sub(ktime_get(), pf->ptp_reset_start);
+
+       /* Update the previous HW time with the ktime delta */
+       timespec64_add_ns(&pf->ptp_prev_hw_time, ktime_to_ns(delta));
+
+       /* Restore the hardware clock registers */
+       i40e_ptp_settime(&pf->ptp_caps, &pf->ptp_prev_hw_time);
+}
+
 /**
  * i40e_ptp_init - Initialize the 1588 support after device probe or reset
  * @pf: Board private structure
@@ -734,6 +782,11 @@ static long i40e_ptp_create_clock(struct i40e_pf *pf)
  * This function sets device up for 1588 support. The first time it is run, it
  * will create a PHC clock device. It does not create a clock device if one
  * already exists. It also reconfigures the device after a reset.
+ *
+ * The first time a clock is created, i40e_ptp_create_clock will set
+ * pf->ptp_prev_hw_time to the current system time. During resets, it is
+ * expected that this timespec will be set to the last known PTP clock time,
+ * in order to preserve the clock time as close as possible across a reset.
  **/
 void i40e_ptp_init(struct i40e_pf *pf)
 {
@@ -765,7 +818,6 @@ void i40e_ptp_init(struct i40e_pf *pf)
                dev_err(&pf->pdev->dev, "%s: ptp_clock_register failed\n",
                        __func__);
        } else if (pf->ptp_clock) {
-               struct timespec64 ts;
                u32 regval;
 
                if (pf->hw.debug_mask & I40E_DEBUG_LAN)
@@ -786,9 +838,8 @@ void i40e_ptp_init(struct i40e_pf *pf)
                /* reset timestamping mode */
                i40e_ptp_set_timestamp_mode(pf, &pf->tstamp_config);
 
-               /* Set the clock value. */
-               ts = ktime_to_timespec64(ktime_get_real());
-               i40e_ptp_settime(&pf->ptp_caps, &ts);
+               /* Restore the clock time based on last known value */
+               i40e_ptp_restore_hw_time(pf);
        }
 }
 
index 6c97667d20eff136cde56c0447c54892c464fe31..1a95223c9f99af3738641b85c46f2b09ac88f384 100644 (file)
@@ -3469,7 +3469,7 @@ static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
        first->next_to_watch = tx_desc;
 
        /* notify HW of packet */
-       if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
+       if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
                writel(i, tx_ring->tail);
 
                /* we need this if more than one processor can write to our tail
index 2781ab91ca82f2b0a9249afbb293042e9705b3a5..79420bcc7414818d7081dc28c13e9c6d75567213 100644 (file)
@@ -1527,6 +1527,8 @@ struct i40e_generic_seg_header {
 struct i40e_metadata_segment {
        struct i40e_generic_seg_header header;
        struct i40e_ddp_version version;
+#define I40E_DDP_TRACKID_RDONLY                0
+#define I40E_DDP_TRACKID_INVALID       0xFFFFFFFF
        u32 track_id;
        char name[I40E_DDP_NAME_SIZE];
 };
@@ -1555,15 +1557,36 @@ struct i40e_profile_section_header {
        struct {
 #define SECTION_TYPE_INFO      0x00000010
 #define SECTION_TYPE_MMIO      0x00000800
+#define SECTION_TYPE_RB_MMIO   0x00001800
 #define SECTION_TYPE_AQ                0x00000801
+#define SECTION_TYPE_RB_AQ     0x00001801
 #define SECTION_TYPE_NOTE      0x80000000
 #define SECTION_TYPE_NAME      0x80000001
+#define SECTION_TYPE_PROTO     0x80000002
+#define SECTION_TYPE_PCTYPE    0x80000003
+#define SECTION_TYPE_PTYPE     0x80000004
                u32 type;
                u32 offset;
                u32 size;
        } section;
 };
 
+struct i40e_profile_tlv_section_record {
+       u8 rtype;
+       u8 type;
+       u16 len;
+       u8 data[12];
+};
+
+/* Generic AQ section in proflie */
+struct i40e_profile_aq_section {
+       u16 opcode;
+       u16 flags;
+       u8  param[16];
+       u16 datalen;
+       u8  data[1];
+};
+
 struct i40e_profile_info {
        u32 track_id;
        struct i40e_ddp_version version;
index 831d52bc3c9ae5b4073d4909f591a7802f63c1c6..71cd159e79020cec738dca2d9278c25ac139484a 100644 (file)
@@ -2454,8 +2454,10 @@ static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg)
                                      (u8 *)&stats, sizeof(stats));
 }
 
-/* If the VF is not trusted restrict the number of MAC/VLAN it can program */
-#define I40E_VC_MAX_MAC_ADDR_PER_VF 12
+/* If the VF is not trusted restrict the number of MAC/VLAN it can program
+ * MAC filters: 16 for multicast, 1 for MAC, 1 for broadcast
+ */
+#define I40E_VC_MAX_MAC_ADDR_PER_VF (16 + 1 + 1)
 #define I40E_VC_MAX_VLAN_PER_VF 8
 
 /**
index b5c182e688e351eed227f867a8eaebaa6349bb07..1b17486543ac7e078a8723512943bab1c50d6d46 100644 (file)
@@ -102,6 +102,8 @@ static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem,
        if (err)
                return err;
 
+       set_bit(qid, vsi->af_xdp_zc_qps);
+
        if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi);
 
        if (if_running) {
@@ -148,6 +150,7 @@ static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid)
                        return err;
        }
 
+       clear_bit(qid, vsi->af_xdp_zc_qps);
        i40e_xsk_umem_dma_unmap(vsi, umem);
 
        if (if_running) {
index af4f94a6541e9d3d4498de8fdc94e48fd93349b3..e5ae4a1c0cff5efdc5d869f3dbd61276d3c65c7c 100644 (file)
@@ -14,7 +14,7 @@
 
 #define I40E_FW_API_VERSION_MAJOR      0x0001
 #define I40E_FW_API_VERSION_MINOR_X722 0x0005
-#define I40E_FW_API_VERSION_MINOR_X710 0x0007
+#define I40E_FW_API_VERSION_MINOR_X710 0x0008
 
 #define I40E_FW_MINOR_VERSION(_h) ((_h)->mac.type == I40E_MAC_XL710 ? \
                                        I40E_FW_API_VERSION_MINOR_X710 : \
index 9b4d7cec2e18af2c5c092096dfc7cfa513829636..b64187753ad67ce6a6fdf3efcd020d16fdea3872 100644 (file)
@@ -2358,7 +2358,7 @@ static inline void iavf_tx_map(struct iavf_ring *tx_ring, struct sk_buff *skb,
        first->next_to_watch = tx_desc;
 
        /* notify HW of packet */
-       if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
+       if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
                writel(i, tx_ring->tail);
 
                /* we need this if more than one processor can write to our tail
index f2462799154a0e65acfb2d34794591c50944f567..a6f7b7feaf3c7f0dc479abb3bac96b958318a304 100644 (file)
@@ -1646,7 +1646,7 @@ ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
        ice_maybe_stop_tx(tx_ring, DESC_NEEDED);
 
        /* notify HW of packet */
-       if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
+       if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
                writel(i, tx_ring->tail);
 
                /* we need this if more than one processor can write to our tail
index 01fcfc6f341519c1d8a67b5c8695ec9c04842b0c..d2e2c50ce257941491e4d0a96603808310357999 100644 (file)
 /* enable link status from external LINK_0 and LINK_1 pins */
 #define E1000_CTRL_SWDPIN0  0x00040000  /* SWDPIN 0 value */
 #define E1000_CTRL_SWDPIN1  0x00080000  /* SWDPIN 1 value */
+#define E1000_CTRL_ADVD3WUC 0x00100000  /* D3 WUC */
+#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 /* PHY PM enable */
 #define E1000_CTRL_SDP0_DIR 0x00400000  /* SDP0 Data direction */
 #define E1000_CTRL_SDP1_DIR 0x00800000  /* SDP1 Data direction */
 #define E1000_CTRL_RST      0x04000000  /* Global reset */
index bea7175d171b9f281caaa1e166b1364855d09f97..acbb5b4f333db909416750e1391f112bcc635bcb 100644 (file)
@@ -6029,7 +6029,7 @@ static int igb_tx_map(struct igb_ring *tx_ring,
        /* Make sure there is space in the ring for the next send. */
        igb_maybe_stop_tx(tx_ring, DESC_NEEDED);
 
-       if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
+       if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
                writel(i, tx_ring->tail);
 
                /* we need this if more than one processor can write to our tail
@@ -8743,9 +8743,7 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
        struct e1000_hw *hw = &adapter->hw;
        u32 ctrl, rctl, status;
        u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
-#ifdef CONFIG_PM
-       int retval = 0;
-#endif
+       bool wake;
 
        rtnl_lock();
        netif_device_detach(netdev);
@@ -8758,14 +8756,6 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
        igb_clear_interrupt_scheme(adapter);
        rtnl_unlock();
 
-#ifdef CONFIG_PM
-       if (!runtime) {
-               retval = pci_save_state(pdev);
-               if (retval)
-                       return retval;
-       }
-#endif
-
        status = rd32(E1000_STATUS);
        if (status & E1000_STATUS_LU)
                wufc &= ~E1000_WUFC_LNKC;
@@ -8782,10 +8772,6 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
                }
 
                ctrl = rd32(E1000_CTRL);
-               /* advertise wake from D3Cold */
-               #define E1000_CTRL_ADVD3WUC 0x00100000
-               /* phy power management enable */
-               #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
                ctrl |= E1000_CTRL_ADVD3WUC;
                wr32(E1000_CTRL, ctrl);
 
@@ -8799,12 +8785,15 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
                wr32(E1000_WUFC, 0);
        }
 
-       *enable_wake = wufc || adapter->en_mng_pt;
-       if (!*enable_wake)
+       wake = wufc || adapter->en_mng_pt;
+       if (!wake)
                igb_power_down_link(adapter);
        else
                igb_power_up_link(adapter);
 
+       if (enable_wake)
+               *enable_wake = wake;
+
        /* Release control of h/w to f/w.  If f/w is AMT enabled, this
         * would have already happened in close and is redundant.
         */
@@ -8847,22 +8836,7 @@ static void igb_deliver_wake_packet(struct net_device *netdev)
 
 static int __maybe_unused igb_suspend(struct device *dev)
 {
-       int retval;
-       bool wake;
-       struct pci_dev *pdev = to_pci_dev(dev);
-
-       retval = __igb_shutdown(pdev, &wake, 0);
-       if (retval)
-               return retval;
-
-       if (wake) {
-               pci_prepare_to_sleep(pdev);
-       } else {
-               pci_wake_from_d3(pdev, false);
-               pci_set_power_state(pdev, PCI_D3hot);
-       }
-
-       return 0;
+       return __igb_shutdown(to_pci_dev(dev), NULL, 0);
 }
 
 static int __maybe_unused igb_resume(struct device *dev)
@@ -8933,22 +8907,7 @@ static int __maybe_unused igb_runtime_idle(struct device *dev)
 
 static int __maybe_unused igb_runtime_suspend(struct device *dev)
 {
-       struct pci_dev *pdev = to_pci_dev(dev);
-       int retval;
-       bool wake;
-
-       retval = __igb_shutdown(pdev, &wake, 1);
-       if (retval)
-               return retval;
-
-       if (wake) {
-               pci_prepare_to_sleep(pdev);
-       } else {
-               pci_wake_from_d3(pdev, false);
-               pci_set_power_state(pdev, PCI_D3hot);
-       }
-
-       return 0;
+       return __igb_shutdown(to_pci_dev(dev), NULL, 1);
 }
 
 static int __maybe_unused igb_runtime_resume(struct device *dev)
index a883b3f357e7a26bafa2627e155b4f2bb04020dc..f79728381e8a8255e4be5d4b77d4ad5c8b3242cf 100644 (file)
@@ -939,7 +939,7 @@ static int igc_tx_map(struct igc_ring *tx_ring,
        /* Make sure there is space in the ring for the next send. */
        igc_maybe_stop_tx(tx_ring, DESC_NEEDED);
 
-       if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
+       if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
                writel(i, tx_ring->tail);
 
                /* we need this if more than one processor can write to our tail
index 16c72898416477ebac2d083adc80005262a1db62..60cec3540dd783e6e079451a719a9d71fb14898e 100644 (file)
@@ -8297,7 +8297,7 @@ static int ixgbe_tx_map(struct ixgbe_ring *tx_ring,
 
        ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
 
-       if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
+       if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
                writel(i, tx_ring->tail);
 
                /* we need this if more than one processor can write to our tail
index cc4907f9ff02c3faecba89c524674f33581d2346..2fb97967961c43893b2d06fef0df9bc476dda426 100644 (file)
@@ -905,13 +905,12 @@ s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw)
        struct pci_dev *pdev = adapter->pdev;
        struct device *dev = &adapter->netdev->dev;
        struct mii_bus *bus;
+       int err = -ENODEV;
 
-       adapter->mii_bus = devm_mdiobus_alloc(dev);
-       if (!adapter->mii_bus)
+       bus = devm_mdiobus_alloc(dev);
+       if (!bus)
                return -ENOMEM;
 
-       bus = adapter->mii_bus;
-
        switch (hw->device_id) {
        /* C3000 SoCs */
        case IXGBE_DEV_ID_X550EM_A_KR:
@@ -949,12 +948,15 @@ s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw)
         */
        hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22;
 
-       return mdiobus_register(bus);
+       err = mdiobus_register(bus);
+       if (!err) {
+               adapter->mii_bus = bus;
+               return 0;
+       }
 
 ixgbe_no_mii_bus:
        devm_mdiobus_free(dev, bus);
-       adapter->mii_bus = NULL;
-       return -ENODEV;
+       return err;
 }
 
 /**
index a944be3c57b1416bae5929c28facae10abdea507..bb68737dce56319bc8f20b42bec37aca9b9faecd 100644 (file)
@@ -2467,7 +2467,7 @@ static netdev_tx_t mvneta_tx(struct sk_buff *skb, struct net_device *dev)
                if (txq->count >= txq->tx_stop_threshold)
                        netif_tx_stop_queue(nq);
 
-               if (!skb->xmit_more || netif_xmit_stopped(nq) ||
+               if (!netdev_xmit_more() || netif_xmit_stopped(nq) ||
                    txq->pending + frags > MVNETA_TXQ_DEC_SENT_MASK)
                        mvneta_txq_pend_desc_add(pp, txq, frags);
                else
index 549d36497b8c10767e29333f90c80bababe783da..53abe925ecb11cb14de40c4f3f23ab67b49b396d 100644 (file)
@@ -767,7 +767,8 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
         */
        wmb();
 
-       if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || !skb->xmit_more)
+       if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) ||
+           !netdev_xmit_more())
                mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR);
 
        return 0;
index fba54fb06e18cde73d80891df15b88687753daee..36a92b19e613d8dba0a5f57461e7de3d09635310 100644 (file)
@@ -1042,7 +1042,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
 
        send_doorbell = __netdev_tx_sent_queue(ring->tx_queue,
                                               tx_info->nr_bytes,
-                                              skb->xmit_more);
+                                              netdev_xmit_more());
 
        real_size = (real_size / 16) & 0x3f;
 
index be48c6440251fb7426ab77d90346e440b9b833c4..0a2ffe794a547170bbce5935dd596d0e2ca3b034 100644 (file)
@@ -1347,7 +1347,7 @@ static void set_wqname(struct mlx5_core_dev *dev)
        struct mlx5_cmd *cmd = &dev->cmd;
 
        snprintf(cmd->wq_name, sizeof(cmd->wq_name), "mlx5_cmd_%s",
-                dev_name(&dev->pdev->dev));
+                dev->priv.name);
 }
 
 static void clean_debug_files(struct mlx5_core_dev *dev)
@@ -1902,9 +1902,9 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
        memset(cmd, 0, sizeof(*cmd));
        cmd_if_rev = cmdif_rev(dev);
        if (cmd_if_rev != CMD_IF_REV) {
-               dev_err(&dev->pdev->dev,
-                       "Driver cmdif rev(%d) differs from firmware's(%d)\n",
-                       CMD_IF_REV, cmd_if_rev);
+               mlx5_core_err(dev,
+                             "Driver cmdif rev(%d) differs from firmware's(%d)\n",
+                             CMD_IF_REV, cmd_if_rev);
                return -EINVAL;
        }
 
@@ -1921,14 +1921,14 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
        cmd->log_sz = cmd_l >> 4 & 0xf;
        cmd->log_stride = cmd_l & 0xf;
        if (1 << cmd->log_sz > MLX5_MAX_COMMANDS) {
-               dev_err(&dev->pdev->dev, "firmware reports too many outstanding commands %d\n",
-                       1 << cmd->log_sz);
+               mlx5_core_err(dev, "firmware reports too many outstanding commands %d\n",
+                             1 << cmd->log_sz);
                err = -EINVAL;
                goto err_free_page;
        }
 
        if (cmd->log_sz + cmd->log_stride > MLX5_ADAPTER_PAGE_SHIFT) {
-               dev_err(&dev->pdev->dev, "command queue size overflow\n");
+               mlx5_core_err(dev, "command queue size overflow\n");
                err = -EINVAL;
                goto err_free_page;
        }
@@ -1939,8 +1939,8 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
 
        cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
        if (cmd->cmdif_rev > CMD_IF_REV) {
-               dev_err(&dev->pdev->dev, "driver does not support command interface version. driver %d, firmware %d\n",
-                       CMD_IF_REV, cmd->cmdif_rev);
+               mlx5_core_err(dev, "driver does not support command interface version. driver %d, firmware %d\n",
+                             CMD_IF_REV, cmd->cmdif_rev);
                err = -EOPNOTSUPP;
                goto err_free_page;
        }
@@ -1956,7 +1956,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
        cmd_h = (u32)((u64)(cmd->dma) >> 32);
        cmd_l = (u32)(cmd->dma);
        if (cmd_l & 0xfff) {
-               dev_err(&dev->pdev->dev, "invalid command queue address\n");
+               mlx5_core_err(dev, "invalid command queue address\n");
                err = -ENOMEM;
                goto err_free_page;
        }
@@ -1976,7 +1976,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
        set_wqname(dev);
        cmd->wq = create_singlethread_workqueue(cmd->wq_name);
        if (!cmd->wq) {
-               dev_err(&dev->pdev->dev, "failed to create command workqueue\n");
+               mlx5_core_err(dev, "failed to create command workqueue\n");
                err = -ENOMEM;
                goto err_cache;
        }
index 83f90e9aff45fa16c9db2b40ec737f214b502ae7..7b5901d42994fe18da6b18c4bdc6414e742042db 100644 (file)
@@ -47,7 +47,7 @@ TRACE_EVENT(mlx5_fw,
        TP_ARGS(tracer, trace_timestamp, lost, event_id, msg),
 
        TP_STRUCT__entry(
-               __string(dev_name, dev_name(&tracer->dev->pdev->dev))
+               __string(dev_name, tracer->dev->priv.name)
                __field(u64, trace_timestamp)
                __field(bool, lost)
                __field(u8, event_id)
@@ -55,7 +55,7 @@ TRACE_EVENT(mlx5_fw,
        ),
 
        TP_fast_assign(
-               __assign_str(dev_name, dev_name(&tracer->dev->pdev->dev));
+               __assign_str(dev_name, tracer->dev->priv.name);
                __entry->trace_timestamp = trace_timestamp;
                __entry->lost = lost;
                __entry->event_id = event_id;
index 9e71cf03369cc418000b60096fad9ee88dc770cb..51e109fdeec12809ececb42b53e7da1ebb5cc69d 100644 (file)
@@ -241,7 +241,6 @@ struct mlx5e_params {
        struct net_dim_cq_moder rx_cq_moderation;
        struct net_dim_cq_moder tx_cq_moderation;
        bool lro_en;
-       u32 lro_wqe_sz;
        u8  tx_min_inline_mode;
        bool vlan_strip_disable;
        bool scatter_fcs_en;
@@ -772,7 +771,7 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
                       struct net_device *sb_dev);
 netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
 netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
-                         struct mlx5e_tx_wqe *wqe, u16 pi);
+                         struct mlx5e_tx_wqe *wqe, u16 pi, bool xmit_more);
 
 void mlx5e_completion_event(struct mlx5_core_cq *mcq);
 void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
@@ -857,6 +856,7 @@ void mlx5e_close_channels(struct mlx5e_channels *chs);
  * switching channels
  */
 typedef int (*mlx5e_fp_hw_modify)(struct mlx5e_priv *priv);
+int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv);
 int mlx5e_safe_switch_channels(struct mlx5e_priv *priv,
                               struct mlx5e_channels *new_chs,
                               mlx5e_fp_hw_modify hw_modify);
@@ -975,7 +975,7 @@ void mlx5e_notify_hw(struct mlx5_wq_cyc *wq, u16 pc,
         */
        wmb();
 
-       mlx5_write64((__be32 *)ctrl, uar_map, NULL);
+       mlx5_write64((__be32 *)ctrl, uar_map);
 }
 
 static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
@@ -1087,6 +1087,7 @@ mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *prof
 int mlx5e_attach_netdev(struct mlx5e_priv *priv);
 void mlx5e_detach_netdev(struct mlx5e_priv *priv);
 void mlx5e_destroy_netdev(struct mlx5e_priv *priv);
+void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv);
 void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
                            struct mlx5e_rss_params *rss_params,
                            struct mlx5e_params *params,
index 122927f3a6005b2be70c694ead0a6d4a139069c6..d5e5afbdca6dcbacb776571c97eef74e033cf356 100644 (file)
@@ -96,9 +96,6 @@ int mlx5_port_query_eth_proto(struct mlx5_core_dev *dev, u8 port, bool ext,
        if (!eproto)
                return -EINVAL;
 
-       if (ext !=  MLX5_CAP_PCAM_FEATURE(dev, ptys_extended_ethernet))
-               return -EOPNOTSUPP;
-
        err = mlx5_query_port_ptys(dev, out, sizeof(out), MLX5_PTYS_EN, port);
        if (err)
                return err;
index b0ce68feb0f33eafe746ab5993e4d62362a7f06f..633b117eb13e81437c402d4ed05efd892bf68eb1 100644 (file)
@@ -122,7 +122,9 @@ static int port_set_buffer(struct mlx5e_priv *priv,
        return err;
 }
 
-/* xoff = ((301+2.16 * len [m]) * speed [Gbps] + 2.72 MTU [B]) */
+/* xoff = ((301+2.16 * len [m]) * speed [Gbps] + 2.72 MTU [B])
+ * minimum speed value is 40Gbps
+ */
 static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu)
 {
        u32 speed;
@@ -130,10 +132,9 @@ static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu)
        int err;
 
        err = mlx5e_port_linkspeed(priv->mdev, &speed);
-       if (err) {
-               mlx5_core_warn(priv->mdev, "cannot get port speed\n");
-               return 0;
-       }
+       if (err)
+               speed = SPEED_40000;
+       speed = max_t(u32, speed, SPEED_40000);
 
        xoff = (301 + 216 * priv->dcbx.cable_len / 100) * speed / 1000 + 272 * mtu / 100;
 
@@ -142,7 +143,7 @@ static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu)
 }
 
 static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
-                                u32 xoff, unsigned int mtu)
+                                u32 xoff, unsigned int max_mtu)
 {
        int i;
 
@@ -154,11 +155,12 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
                }
 
                if (port_buffer->buffer[i].size <
-                   (xoff + mtu + (1 << MLX5E_BUFFER_CELL_SHIFT)))
+                   (xoff + max_mtu + (1 << MLX5E_BUFFER_CELL_SHIFT)))
                        return -ENOMEM;
 
                port_buffer->buffer[i].xoff = port_buffer->buffer[i].size - xoff;
-               port_buffer->buffer[i].xon  = port_buffer->buffer[i].xoff - mtu;
+               port_buffer->buffer[i].xon  =
+                       port_buffer->buffer[i].xoff - max_mtu;
        }
 
        return 0;
@@ -166,7 +168,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
 
 /**
  *     update_buffer_lossy     - Update buffer configuration based on pfc
- *     @mtu: device's MTU
+ *     @max_mtu: netdev's max_mtu
  *     @pfc_en: <input> current pfc configuration
  *     @buffer: <input> current prio to buffer mapping
  *     @xoff:   <input> xoff value
@@ -183,7 +185,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
  *     @return: 0 if no error,
  *     sets change to true if buffer configuration was modified.
  */
-static int update_buffer_lossy(unsigned int mtu,
+static int update_buffer_lossy(unsigned int max_mtu,
                               u8 pfc_en, u8 *buffer, u32 xoff,
                               struct mlx5e_port_buffer *port_buffer,
                               bool *change)
@@ -220,7 +222,7 @@ static int update_buffer_lossy(unsigned int mtu,
        }
 
        if (changed) {
-               err = update_xoff_threshold(port_buffer, xoff, mtu);
+               err = update_xoff_threshold(port_buffer, xoff, max_mtu);
                if (err)
                        return err;
 
@@ -230,6 +232,7 @@ static int update_buffer_lossy(unsigned int mtu,
        return 0;
 }
 
+#define MINIMUM_MAX_MTU 9216
 int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
                                    u32 change, unsigned int mtu,
                                    struct ieee_pfc *pfc,
@@ -241,12 +244,14 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
        bool update_prio2buffer = false;
        u8 buffer[MLX5E_MAX_PRIORITY];
        bool update_buffer = false;
+       unsigned int max_mtu;
        u32 total_used = 0;
        u8 curr_pfc_en;
        int err;
        int i;
 
        mlx5e_dbg(HW, priv, "%s: change=%x\n", __func__, change);
+       max_mtu = max_t(unsigned int, priv->netdev->max_mtu, MINIMUM_MAX_MTU);
 
        err = mlx5e_port_query_buffer(priv, &port_buffer);
        if (err)
@@ -254,7 +259,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
 
        if (change & MLX5E_PORT_BUFFER_CABLE_LEN) {
                update_buffer = true;
-               err = update_xoff_threshold(&port_buffer, xoff, mtu);
+               err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
                if (err)
                        return err;
        }
@@ -264,7 +269,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
                if (err)
                        return err;
 
-               err = update_buffer_lossy(mtu, pfc->pfc_en, buffer, xoff,
+               err = update_buffer_lossy(max_mtu, pfc->pfc_en, buffer, xoff,
                                          &port_buffer, &update_buffer);
                if (err)
                        return err;
@@ -276,8 +281,8 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
                if (err)
                        return err;
 
-               err = update_buffer_lossy(mtu, curr_pfc_en, prio2buffer, xoff,
-                                         &port_buffer, &update_buffer);
+               err = update_buffer_lossy(max_mtu, curr_pfc_en, prio2buffer,
+                                         xoff, &port_buffer, &update_buffer);
                if (err)
                        return err;
        }
@@ -301,7 +306,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
                        return -EINVAL;
 
                update_buffer = true;
-               err = update_xoff_threshold(&port_buffer, xoff, mtu);
+               err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
                if (err)
                        return err;
        }
@@ -309,7 +314,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
        /* Need to update buffer configuration if xoff value is changed */
        if (!update_buffer && xoff != priv->dcbx.xoff) {
                update_buffer = true;
-               err = update_xoff_threshold(&port_buffer, xoff, mtu);
+               err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
                if (err)
                        return err;
        }
index 9d38e62cdf248a2f624b12227133f8132f7591bd..476dd97f7f2f25a4c0697a6ac2b34c0b5985034e 100644 (file)
@@ -186,12 +186,17 @@ static int mlx5e_tx_reporter_recover_from_ctx(struct mlx5e_tx_err_ctx *err_ctx)
 
 static int mlx5e_tx_reporter_recover_all(struct mlx5e_priv *priv)
 {
-       int err;
+       int err = 0;
 
        rtnl_lock();
        mutex_lock(&priv->state_lock);
-       mlx5e_close_locked(priv->netdev);
-       err = mlx5e_open_locked(priv->netdev);
+
+       if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+               goto out;
+
+       err = mlx5e_safe_reopen_channels(priv);
+
+out:
        mutex_unlock(&priv->state_lock);
        rtnl_unlock();
 
index fa2a3c444cdc604c308999f140a3125becd9c8d3..fe5d4d7f15edc80426bed373e2bf646dfd39dde7 100644 (file)
@@ -39,6 +39,10 @@ static int get_route_and_out_devs(struct mlx5e_priv *priv,
                        return -EOPNOTSUPP;
        }
 
+       if (!(mlx5e_eswitch_rep(*out_dev) &&
+             mlx5e_is_uplink_rep(netdev_priv(*out_dev))))
+               return -EOPNOTSUPP;
+
        return 0;
 }
 
@@ -70,7 +74,7 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
        if (ret)
                return ret;
 
-       if (mlx5_lag_is_multipath(mdev) && !rt->rt_gateway)
+       if (mlx5_lag_is_multipath(mdev) && rt->rt_gw_family != AF_INET)
                return -ENETUNREACH;
 #else
        return -EOPNOTSUPP;
@@ -96,7 +100,7 @@ static const char *mlx5e_netdev_kind(struct net_device *dev)
        if (dev->rtnl_link_ops)
                return dev->rtnl_link_ops->kind;
        else
-               return "";
+               return "unknown";
 }
 
 static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
@@ -636,8 +640,10 @@ int mlx5e_tc_tun_parse(struct net_device *filter_dev,
                                                headers_c, headers_v);
        } else {
                netdev_warn(priv->netdev,
-                           "decapsulation offload is not supported for %s net device (%d)\n",
-                           mlx5e_netdev_kind(filter_dev), tunnel_type);
+                           "decapsulation offload is not supported for %s (kind: \"%s\")\n",
+                           netdev_name(filter_dev),
+                           mlx5e_netdev_kind(filter_dev));
+
                return -EOPNOTSUPP;
        }
        return err;
index be137d4a91692026acf2a2bce299667fc7228dcc..439bf5953885eb25f2aba6dd4aa1cef4cc0f4ecf 100644 (file)
@@ -181,7 +181,6 @@ static void mlx5e_tls_complete_sync_skb(struct sk_buff *skb,
         */
        nskb->ip_summed = CHECKSUM_PARTIAL;
 
-       nskb->xmit_more = 1;
        nskb->queue_mapping = skb->queue_mapping;
 }
 
@@ -248,7 +247,7 @@ mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context_tx *context,
        sq->stats->tls_resync_bytes += nskb->len;
        mlx5e_tls_complete_sync_skb(skb, nskb, tcp_seq, headln,
                                    cpu_to_be64(info.rcd_sn));
-       mlx5e_sq_xmit(sq, nskb, *wqe, *pi);
+       mlx5e_sq_xmit(sq, nskb, *wqe, *pi, true);
        mlx5e_sq_fetch_wqe(sq, wqe, pi);
        return skb;
 
index 3078491cc0d0678a6f1867373c752ed95496a59e..1539cf3de5dc97a180d7bdcb9fe2c5ec79db93c4 100644 (file)
@@ -45,7 +45,9 @@ int mlx5e_create_tir(struct mlx5_core_dev *mdev,
        if (err)
                return err;
 
+       mutex_lock(&mdev->mlx5e_res.td.list_lock);
        list_add(&tir->list, &mdev->mlx5e_res.td.tirs_list);
+       mutex_unlock(&mdev->mlx5e_res.td.list_lock);
 
        return 0;
 }
@@ -53,8 +55,10 @@ int mlx5e_create_tir(struct mlx5_core_dev *mdev,
 void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
                       struct mlx5e_tir *tir)
 {
+       mutex_lock(&mdev->mlx5e_res.td.list_lock);
        mlx5_core_destroy_tir(mdev, tir->tirn);
        list_del(&tir->list);
+       mutex_unlock(&mdev->mlx5e_res.td.list_lock);
 }
 
 static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
@@ -114,6 +118,7 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev)
        }
 
        INIT_LIST_HEAD(&mdev->mlx5e_res.td.tirs_list);
+       mutex_init(&mdev->mlx5e_res.td.list_lock);
 
        return 0;
 
@@ -141,15 +146,17 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb)
 {
        struct mlx5_core_dev *mdev = priv->mdev;
        struct mlx5e_tir *tir;
-       int err  = -ENOMEM;
+       int err  = 0;
        u32 tirn = 0;
        int inlen;
        void *in;
 
        inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
        in = kvzalloc(inlen, GFP_KERNEL);
-       if (!in)
+       if (!in) {
+               err = -ENOMEM;
                goto out;
+       }
 
        if (enable_uc_lb)
                MLX5_SET(modify_tir_in, in, ctx.self_lb_block,
@@ -157,6 +164,7 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb)
 
        MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1);
 
+       mutex_lock(&mdev->mlx5e_res.td.list_lock);
        list_for_each_entry(tir, &mdev->mlx5e_res.td.tirs_list, list) {
                tirn = tir->tirn;
                err = mlx5_core_modify_tir(mdev, tirn, in, inlen);
@@ -168,6 +176,7 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb)
        kvfree(in);
        if (err)
                netdev_err(priv->netdev, "refresh tir(0x%x) failed, %d\n", tirn, err);
+       mutex_unlock(&mdev->mlx5e_res.td.list_lock);
 
        return err;
 }
index a0987cc5fe4a12af0bf0155ad8f290153898518c..76a3d01a489e00832ee5ff45e2442dbef60b6d6d 100644 (file)
@@ -603,16 +603,18 @@ static void ptys2ethtool_supported_link(struct mlx5_core_dev *mdev,
                          __ETHTOOL_LINK_MODE_MASK_NBITS);
 }
 
-static void ptys2ethtool_adver_link(struct mlx5_core_dev *mdev,
-                                   unsigned long *advertising_modes,
-                                   u32 eth_proto_cap)
+static void ptys2ethtool_adver_link(unsigned long *advertising_modes,
+                                   u32 eth_proto_cap, bool ext)
 {
        unsigned long proto_cap = eth_proto_cap;
        struct ptys2ethtool_config *table;
        u32 max_size;
        int proto;
 
-       mlx5e_ethtool_get_speed_arr(mdev, &table, &max_size);
+       table = ext ? ptys2ext_ethtool_table : ptys2legacy_ethtool_table;
+       max_size = ext ? ARRAY_SIZE(ptys2ext_ethtool_table) :
+                        ARRAY_SIZE(ptys2legacy_ethtool_table);
+
        for_each_set_bit(proto, &proto_cap, max_size)
                bitmap_or(advertising_modes, advertising_modes,
                          table[proto].advertised,
@@ -794,12 +796,12 @@ static void get_supported(struct mlx5_core_dev *mdev, u32 eth_proto_cap,
        ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Pause);
 }
 
-static void get_advertising(struct mlx5_core_dev *mdev, u32 eth_proto_cap,
-                           u8 tx_pause, u8 rx_pause,
-                           struct ethtool_link_ksettings *link_ksettings)
+static void get_advertising(u32 eth_proto_cap, u8 tx_pause, u8 rx_pause,
+                           struct ethtool_link_ksettings *link_ksettings,
+                           bool ext)
 {
        unsigned long *advertising = link_ksettings->link_modes.advertising;
-       ptys2ethtool_adver_link(mdev, advertising, eth_proto_cap);
+       ptys2ethtool_adver_link(advertising, eth_proto_cap, ext);
 
        if (rx_pause)
                ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Pause);
@@ -854,8 +856,9 @@ static void get_lp_advertising(struct mlx5_core_dev *mdev, u32 eth_proto_lp,
                               struct ethtool_link_ksettings *link_ksettings)
 {
        unsigned long *lp_advertising = link_ksettings->link_modes.lp_advertising;
+       bool ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
 
-       ptys2ethtool_adver_link(mdev, lp_advertising, eth_proto_lp);
+       ptys2ethtool_adver_link(lp_advertising, eth_proto_lp, ext);
 }
 
 int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
@@ -872,6 +875,7 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
        u8 an_disable_admin;
        u8 an_status;
        u8 connector_type;
+       bool admin_ext;
        bool ext;
        int err;
 
@@ -886,6 +890,19 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
                                              eth_proto_capability);
        eth_proto_admin  = MLX5_GET_ETH_PROTO(ptys_reg, out, ext,
                                              eth_proto_admin);
+       /* Fields: eth_proto_admin and ext_eth_proto_admin  are
+        * mutually exclusive. Hence try reading legacy advertising
+        * when extended advertising is zero.
+        * admin_ext indicates how eth_proto_admin should be
+        * interpreted
+        */
+       admin_ext = ext;
+       if (ext && !eth_proto_admin) {
+               eth_proto_admin  = MLX5_GET_ETH_PROTO(ptys_reg, out, false,
+                                                     eth_proto_admin);
+               admin_ext = false;
+       }
+
        eth_proto_oper   = MLX5_GET_ETH_PROTO(ptys_reg, out, ext,
                                              eth_proto_oper);
        eth_proto_lp        = MLX5_GET(ptys_reg, out, eth_proto_lp_advertise);
@@ -899,7 +916,8 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
        ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
 
        get_supported(mdev, eth_proto_cap, link_ksettings);
-       get_advertising(mdev, eth_proto_admin, tx_pause, rx_pause, link_ksettings);
+       get_advertising(eth_proto_admin, tx_pause, rx_pause, link_ksettings,
+                       admin_ext);
        get_speed_duplex(priv->netdev, eth_proto_oper, link_ksettings);
 
        eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
@@ -997,19 +1015,17 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
 
 #define MLX5E_PTYS_EXT ((1ULL << ETHTOOL_LINK_MODE_50000baseKR_Full_BIT) - 1)
 
-       ext_requested = (link_ksettings->link_modes.advertising[0] >
-                       MLX5E_PTYS_EXT);
+       ext_requested = !!(link_ksettings->link_modes.advertising[0] >
+                       MLX5E_PTYS_EXT ||
+                       link_ksettings->link_modes.advertising[1]);
        ext_supported = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
-
-       /*when ptys_extended_ethernet is set legacy link modes are deprecated */
-       if (ext_requested != ext_supported)
-               return -EPROTONOSUPPORT;
+       ext_requested &= ext_supported;
 
        speed = link_ksettings->base.speed;
        ethtool2ptys_adver_func = ext_requested ?
                                  mlx5e_ethtool2ptys_ext_adver_link :
                                  mlx5e_ethtool2ptys_adver_link;
-       err = mlx5_port_query_eth_proto(mdev, 1, ext_supported, &eproto);
+       err = mlx5_port_query_eth_proto(mdev, 1, ext_requested, &eproto);
        if (err) {
                netdev_err(priv->netdev, "%s: query port eth proto failed: %d\n",
                           __func__, err);
@@ -1037,7 +1053,7 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
        if (!an_changes && link_modes == eproto.admin)
                goto out;
 
-       mlx5_port_set_eth_ptys(mdev, an_disable, link_modes, ext_supported);
+       mlx5_port_set_eth_ptys(mdev, an_disable, link_modes, ext_requested);
        mlx5_toggle_port_link(mdev);
 
 out:
@@ -1752,7 +1768,8 @@ static int set_pflag_rx_no_csum_complete(struct net_device *netdev, bool enable)
        struct mlx5e_channel *c;
        int i;
 
-       if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+       if (!test_bit(MLX5E_STATE_OPENED, &priv->state) ||
+           priv->channels.params.xdp_prog)
                return 0;
 
        for (i = 0; i < channels->num; i++) {
index e08a1eb04e221c3497091c6ba8ef8d0e5f0060d0..5c127fccad601203956f50821bd027b734d9bdb5 100644 (file)
@@ -204,7 +204,6 @@ static u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev,
 void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
                               struct mlx5e_params *params)
 {
-       params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
        params->log_rq_mtu_frames = is_kdump_kernel() ?
                MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE :
                MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
@@ -953,7 +952,11 @@ static int mlx5e_open_rq(struct mlx5e_channel *c,
        if (params->rx_dim_enabled)
                __set_bit(MLX5E_RQ_STATE_AM, &c->rq.state);
 
-       if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE))
+       /* We disable csum_complete when XDP is enabled since
+        * XDP programs might manipulate packets which will render
+        * skb->checksum incorrect.
+        */
+       if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE) || c->xdp)
                __set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state);
 
        return 0;
@@ -2637,7 +2640,7 @@ static void mlx5e_build_tir_ctx_lro(struct mlx5e_params *params, void *tirc)
                 MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
                 MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
        MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
-                (params->lro_wqe_sz - ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
+                (MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ - ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
        MLX5_SET(tirc, tirc, lro_timeout_period_usecs, params->lro_timeout);
 }
 
@@ -2812,6 +2815,21 @@ int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv)
        return 0;
 }
 
+void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv)
+{
+       struct mlx5e_params *params = &priv->channels.params;
+       struct net_device *netdev   = priv->netdev;
+       struct mlx5_core_dev *mdev  = priv->mdev;
+       u16 max_mtu;
+
+       /* MTU range: 68 - hw-specific max */
+       netdev->min_mtu = ETH_MIN_MTU;
+
+       mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
+       netdev->max_mtu = min_t(unsigned int, MLX5E_HW2SW_MTU(params, max_mtu),
+                               ETH_MAX_MTU);
+}
+
 static void mlx5e_netdev_set_tcs(struct net_device *netdev)
 {
        struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -2942,6 +2960,14 @@ int mlx5e_safe_switch_channels(struct mlx5e_priv *priv,
        return 0;
 }
 
+int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv)
+{
+       struct mlx5e_channels new_channels = {};
+
+       new_channels.params = priv->channels.params;
+       return mlx5e_safe_switch_channels(priv, &new_channels, NULL);
+}
+
 void mlx5e_timestamp_init(struct mlx5e_priv *priv)
 {
        priv->tstamp.tx_type   = HWTSTAMP_TX_OFF;
@@ -4172,11 +4198,10 @@ static void mlx5e_tx_timeout_work(struct work_struct *work)
        if (!report_failed)
                goto unlock;
 
-       mlx5e_close_locked(priv->netdev);
-       err = mlx5e_open_locked(priv->netdev);
+       err = mlx5e_safe_reopen_channels(priv);
        if (err)
                netdev_err(priv->netdev,
-                          "mlx5e_open_locked failed recovering from a tx_timeout, err(%d).\n",
+                          "mlx5e_safe_reopen_channels failed recovering from a tx_timeout, err(%d).\n",
                           err);
 
 unlock:
@@ -4564,7 +4589,7 @@ void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params,
 {
        enum mlx5e_traffic_types tt;
 
-       rss_params->hfunc = ETH_RSS_HASH_XOR;
+       rss_params->hfunc = ETH_RSS_HASH_TOP;
        netdev_rss_key_fill(rss_params->toeplitz_hash_key,
                            sizeof(rss_params->toeplitz_hash_key));
        mlx5e_build_default_indir_rqt(rss_params->indirection_rqt,
@@ -4913,7 +4938,6 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
 {
        struct net_device *netdev = priv->netdev;
        struct mlx5_core_dev *mdev = priv->mdev;
-       u16 max_mtu;
 
        mlx5e_init_l2_addr(priv);
 
@@ -4921,10 +4945,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
        if (!netif_running(netdev))
                mlx5_set_port_admin_status(mdev, MLX5_PORT_DOWN);
 
-       /* MTU range: 68 - hw-specific max */
-       netdev->min_mtu = ETH_MIN_MTU;
-       mlx5_query_port_max_mtu(priv->mdev, &max_mtu, 1);
-       netdev->max_mtu = MLX5E_HW2SW_MTU(&priv->channels.params, max_mtu);
+       mlx5e_set_netdev_mtu_boundaries(priv);
        mlx5e_set_dev_port_mtu(priv);
 
        mlx5_lag_add(mdev, netdev);
index a66b6ed80b302f2c236e303a233812ddd678401f..6bfdefa8b9f410ddb3f0b0ad7077cd79108f66cf 100644 (file)
@@ -795,7 +795,8 @@ static int mlx5e_nic_rep_netdevice_event(struct notifier_block *nb,
        struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
        struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
 
-       if (!mlx5e_tc_tun_device_to_offload(priv, netdev))
+       if (!mlx5e_tc_tun_device_to_offload(priv, netdev) &&
+           !is_vlan_dev(netdev))
                return NOTIFY_OK;
 
        switch (event) {
@@ -1623,13 +1624,7 @@ static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv)
 
 static void mlx5e_vf_rep_enable(struct mlx5e_priv *priv)
 {
-       struct net_device *netdev = priv->netdev;
-       struct mlx5_core_dev *mdev = priv->mdev;
-       u16 max_mtu;
-
-       netdev->min_mtu = ETH_MIN_MTU;
-       mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
-       netdev->max_mtu = MLX5E_HW2SW_MTU(&priv->channels.params, max_mtu);
+       mlx5e_set_netdev_mtu_boundaries(priv);
 }
 
 static int uplink_rep_async_event(struct notifier_block *nb, unsigned long event, void *data)
index 3dde5c7e0739afd6d04f874290d5a332c97f68cf..c3b3002ff62f073f8c9fff88ea2fb74693474619 100644 (file)
@@ -692,7 +692,14 @@ static inline bool is_last_ethertype_ip(struct sk_buff *skb, int *network_depth,
 {
        *proto = ((struct ethhdr *)skb->data)->h_proto;
        *proto = __vlan_get_protocol(skb, *proto, network_depth);
-       return (*proto == htons(ETH_P_IP) || *proto == htons(ETH_P_IPV6));
+
+       if (*proto == htons(ETH_P_IP))
+               return pskb_may_pull(skb, *network_depth + sizeof(struct iphdr));
+
+       if (*proto == htons(ETH_P_IPV6))
+               return pskb_may_pull(skb, *network_depth + sizeof(struct ipv6hdr));
+
+       return false;
 }
 
 static inline void mlx5e_enable_ecn(struct mlx5e_rq *rq, struct sk_buff *skb)
@@ -712,17 +719,6 @@ static inline void mlx5e_enable_ecn(struct mlx5e_rq *rq, struct sk_buff *skb)
        rq->stats->ecn_mark += !!rc;
 }
 
-static u32 mlx5e_get_fcs(const struct sk_buff *skb)
-{
-       const void *fcs_bytes;
-       u32 _fcs_bytes;
-
-       fcs_bytes = skb_header_pointer(skb, skb->len - ETH_FCS_LEN,
-                                      ETH_FCS_LEN, &_fcs_bytes);
-
-       return __get_unaligned_cpu32(fcs_bytes);
-}
-
 static u8 get_ip_proto(struct sk_buff *skb, int network_depth, __be16 proto)
 {
        void *ip_p = skb->data + network_depth;
@@ -733,6 +729,68 @@ static u8 get_ip_proto(struct sk_buff *skb, int network_depth, __be16 proto)
 
 #define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN)
 
+#define MAX_PADDING 8
+
+static void
+tail_padding_csum_slow(struct sk_buff *skb, int offset, int len,
+                      struct mlx5e_rq_stats *stats)
+{
+       stats->csum_complete_tail_slow++;
+       skb->csum = csum_block_add(skb->csum,
+                                  skb_checksum(skb, offset, len, 0),
+                                  offset);
+}
+
+static void
+tail_padding_csum(struct sk_buff *skb, int offset,
+                 struct mlx5e_rq_stats *stats)
+{
+       u8 tail_padding[MAX_PADDING];
+       int len = skb->len - offset;
+       void *tail;
+
+       if (unlikely(len > MAX_PADDING)) {
+               tail_padding_csum_slow(skb, offset, len, stats);
+               return;
+       }
+
+       tail = skb_header_pointer(skb, offset, len, tail_padding);
+       if (unlikely(!tail)) {
+               tail_padding_csum_slow(skb, offset, len, stats);
+               return;
+       }
+
+       stats->csum_complete_tail++;
+       skb->csum = csum_block_add(skb->csum, csum_partial(tail, len, 0), offset);
+}
+
+static void
+mlx5e_skb_padding_csum(struct sk_buff *skb, int network_depth, __be16 proto,
+                      struct mlx5e_rq_stats *stats)
+{
+       struct ipv6hdr *ip6;
+       struct iphdr   *ip4;
+       int pkt_len;
+
+       switch (proto) {
+       case htons(ETH_P_IP):
+               ip4 = (struct iphdr *)(skb->data + network_depth);
+               pkt_len = network_depth + ntohs(ip4->tot_len);
+               break;
+       case htons(ETH_P_IPV6):
+               ip6 = (struct ipv6hdr *)(skb->data + network_depth);
+               pkt_len = network_depth + sizeof(*ip6) + ntohs(ip6->payload_len);
+               break;
+       default:
+               return;
+       }
+
+       if (likely(pkt_len >= skb->len))
+               return;
+
+       tail_padding_csum(skb, pkt_len, stats);
+}
+
 static inline void mlx5e_handle_csum(struct net_device *netdev,
                                     struct mlx5_cqe64 *cqe,
                                     struct mlx5e_rq *rq,
@@ -752,7 +810,8 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
                return;
        }
 
-       if (unlikely(test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state)))
+       /* True when explicitly set via priv flag, or XDP prog is loaded */
+       if (test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state))
                goto csum_unnecessary;
 
        /* CQE csum doesn't cover padding octets in short ethernet
@@ -780,18 +839,15 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
                        skb->csum = csum_partial(skb->data + ETH_HLEN,
                                                 network_depth - ETH_HLEN,
                                                 skb->csum);
-               if (unlikely(netdev->features & NETIF_F_RXFCS))
-                       skb->csum = csum_block_add(skb->csum,
-                                                  (__force __wsum)mlx5e_get_fcs(skb),
-                                                  skb->len - ETH_FCS_LEN);
+
+               mlx5e_skb_padding_csum(skb, network_depth, proto, stats);
                stats->csum_complete++;
                return;
        }
 
 csum_unnecessary:
        if (likely((cqe->hds_ip_ext & CQE_L3_OK) &&
-                  ((cqe->hds_ip_ext & CQE_L4_OK) ||
-                   (get_cqe_l4_hdr_type(cqe) == CQE_L4_HDR_TYPE_NONE)))) {
+                  (cqe->hds_ip_ext & CQE_L4_OK))) {
                skb->ip_summed = CHECKSUM_UNNECESSARY;
                if (cqe_is_tunneled(cqe)) {
                        skb->csum_level = 1;
index 1a78e05cbba8168d919bfd45af3378becd3c9b68..b75aa8b8bf04eac8cac464c0c8550013154f6267 100644 (file)
@@ -59,6 +59,8 @@ static const struct counter_desc sw_stats_desc[] = {
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail_slow) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_redirect) },
@@ -151,6 +153,8 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
                s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets;
                s->rx_csum_none += rq_stats->csum_none;
                s->rx_csum_complete += rq_stats->csum_complete;
+               s->rx_csum_complete_tail += rq_stats->csum_complete_tail;
+               s->rx_csum_complete_tail_slow += rq_stats->csum_complete_tail_slow;
                s->rx_csum_unnecessary += rq_stats->csum_unnecessary;
                s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
                s->rx_xdp_drop     += rq_stats->xdp_drop;
@@ -1190,6 +1194,8 @@ static const struct counter_desc rq_stats_desc[] = {
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) },
+       { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail) },
+       { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail_slow) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },
index 4640d4f986f8c6495bc5c94cf22217fb59a64b34..16c3b785f282b109e9b2bc54bd4c136095be9b3f 100644 (file)
@@ -71,6 +71,8 @@ struct mlx5e_sw_stats {
        u64 rx_csum_unnecessary;
        u64 rx_csum_none;
        u64 rx_csum_complete;
+       u64 rx_csum_complete_tail;
+       u64 rx_csum_complete_tail_slow;
        u64 rx_csum_unnecessary_inner;
        u64 rx_xdp_drop;
        u64 rx_xdp_redirect;
@@ -181,6 +183,8 @@ struct mlx5e_rq_stats {
        u64 packets;
        u64 bytes;
        u64 csum_complete;
+       u64 csum_complete_tail;
+       u64 csum_complete_tail_slow;
        u64 csum_unnecessary;
        u64 csum_unnecessary_inner;
        u64 csum_none;
index 2fd425a7b156fe44a353cbb8ae81ee1a859e43e0..a2070817a6271f72257ae5ed4f83cce47864edeb 100644 (file)
@@ -1438,6 +1438,26 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
        return 0;
 }
 
+static void *get_match_headers_criteria(u32 flags,
+                                       struct mlx5_flow_spec *spec)
+{
+       return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ?
+               MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+                            inner_headers) :
+               MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+                            outer_headers);
+}
+
+static void *get_match_headers_value(u32 flags,
+                                    struct mlx5_flow_spec *spec)
+{
+       return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ?
+               MLX5_ADDR_OF(fte_match_param, spec->match_value,
+                            inner_headers) :
+               MLX5_ADDR_OF(fte_match_param, spec->match_value,
+                            outer_headers);
+}
+
 static int __parse_cls_flower(struct mlx5e_priv *priv,
                              struct mlx5_flow_spec *spec,
                              struct tc_cls_flower_offload *f,
@@ -1503,10 +1523,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
                /* In decap flow, header pointers should point to the inner
                 * headers, outer header were already set by parse_tunnel_attr
                 */
-               headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
-                                        inner_headers);
-               headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
-                                        inner_headers);
+               headers_c = get_match_headers_criteria(MLX5_FLOW_CONTEXT_ACTION_DECAP,
+                                                      spec);
+               headers_v = get_match_headers_value(MLX5_FLOW_CONTEXT_ACTION_DECAP,
+                                                   spec);
        }
 
        if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
@@ -1521,11 +1541,23 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
                if (match.mask->n_proto)
                        *match_level = MLX5_MATCH_L2;
        }
-
-       if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
+       if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN) ||
+           is_vlan_dev(filter_dev)) {
+               struct flow_dissector_key_vlan filter_dev_mask;
+               struct flow_dissector_key_vlan filter_dev_key;
                struct flow_match_vlan match;
 
-               flow_rule_match_vlan(rule, &match);
+               if (is_vlan_dev(filter_dev)) {
+                       match.key = &filter_dev_key;
+                       match.key->vlan_id = vlan_dev_vlan_id(filter_dev);
+                       match.key->vlan_tpid = vlan_dev_vlan_proto(filter_dev);
+                       match.key->vlan_priority = 0;
+                       match.mask = &filter_dev_mask;
+                       memset(match.mask, 0xff, sizeof(*match.mask));
+                       match.mask->vlan_priority = 0;
+               } else {
+                       flow_rule_match_vlan(rule, &match);
+               }
                if (match.mask->vlan_id ||
                    match.mask->vlan_priority ||
                    match.mask->vlan_tpid) {
@@ -1875,39 +1907,73 @@ struct mlx5_fields {
        u8  field;
        u8  size;
        u32 offset;
+       u32 match_offset;
 };
 
-#define OFFLOAD(fw_field, size, field, off) \
-               {MLX5_ACTION_IN_FIELD_OUT_ ## fw_field, size, offsetof(struct pedit_headers, field) + (off)}
+#define OFFLOAD(fw_field, size, field, off, match_field) \
+               {MLX5_ACTION_IN_FIELD_OUT_ ## fw_field, size, \
+                offsetof(struct pedit_headers, field) + (off), \
+                MLX5_BYTE_OFF(fte_match_set_lyr_2_4, match_field)}
+
+static bool cmp_val_mask(void *valp, void *maskp, void *matchvalp,
+                        void *matchmaskp, int size)
+{
+       bool same = false;
+
+       switch (size) {
+       case sizeof(u8):
+               same = ((*(u8 *)valp) & (*(u8 *)maskp)) ==
+                      ((*(u8 *)matchvalp) & (*(u8 *)matchmaskp));
+               break;
+       case sizeof(u16):
+               same = ((*(u16 *)valp) & (*(u16 *)maskp)) ==
+                      ((*(u16 *)matchvalp) & (*(u16 *)matchmaskp));
+               break;
+       case sizeof(u32):
+               same = ((*(u32 *)valp) & (*(u32 *)maskp)) ==
+                      ((*(u32 *)matchvalp) & (*(u32 *)matchmaskp));
+               break;
+       }
+
+       return same;
+}
 
 static struct mlx5_fields fields[] = {
-       OFFLOAD(DMAC_47_16, 4, eth.h_dest[0], 0),
-       OFFLOAD(DMAC_15_0,  2, eth.h_dest[4], 0),
-       OFFLOAD(SMAC_47_16, 4, eth.h_source[0], 0),
-       OFFLOAD(SMAC_15_0,  2, eth.h_source[4], 0),
-       OFFLOAD(ETHERTYPE,  2, eth.h_proto, 0),
-       OFFLOAD(FIRST_VID,  2, vlan.h_vlan_TCI, 0),
-
-       OFFLOAD(IP_TTL, 1, ip4.ttl,   0),
-       OFFLOAD(SIPV4,  4, ip4.saddr, 0),
-       OFFLOAD(DIPV4,  4, ip4.daddr, 0),
-
-       OFFLOAD(SIPV6_127_96, 4, ip6.saddr.s6_addr32[0], 0),
-       OFFLOAD(SIPV6_95_64,  4, ip6.saddr.s6_addr32[1], 0),
-       OFFLOAD(SIPV6_63_32,  4, ip6.saddr.s6_addr32[2], 0),
-       OFFLOAD(SIPV6_31_0,   4, ip6.saddr.s6_addr32[3], 0),
-       OFFLOAD(DIPV6_127_96, 4, ip6.daddr.s6_addr32[0], 0),
-       OFFLOAD(DIPV6_95_64,  4, ip6.daddr.s6_addr32[1], 0),
-       OFFLOAD(DIPV6_63_32,  4, ip6.daddr.s6_addr32[2], 0),
-       OFFLOAD(DIPV6_31_0,   4, ip6.daddr.s6_addr32[3], 0),
-       OFFLOAD(IPV6_HOPLIMIT, 1, ip6.hop_limit, 0),
-
-       OFFLOAD(TCP_SPORT, 2, tcp.source,  0),
-       OFFLOAD(TCP_DPORT, 2, tcp.dest,    0),
-       OFFLOAD(TCP_FLAGS, 1, tcp.ack_seq, 5),
-
-       OFFLOAD(UDP_SPORT, 2, udp.source, 0),
-       OFFLOAD(UDP_DPORT, 2, udp.dest,   0),
+       OFFLOAD(DMAC_47_16, 4, eth.h_dest[0], 0, dmac_47_16),
+       OFFLOAD(DMAC_15_0,  2, eth.h_dest[4], 0, dmac_15_0),
+       OFFLOAD(SMAC_47_16, 4, eth.h_source[0], 0, smac_47_16),
+       OFFLOAD(SMAC_15_0,  2, eth.h_source[4], 0, smac_15_0),
+       OFFLOAD(ETHERTYPE,  2, eth.h_proto, 0, ethertype),
+       OFFLOAD(FIRST_VID,  2, vlan.h_vlan_TCI, 0, first_vid),
+
+       OFFLOAD(IP_TTL, 1, ip4.ttl,   0, ttl_hoplimit),
+       OFFLOAD(SIPV4,  4, ip4.saddr, 0, src_ipv4_src_ipv6.ipv4_layout.ipv4),
+       OFFLOAD(DIPV4,  4, ip4.daddr, 0, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
+
+       OFFLOAD(SIPV6_127_96, 4, ip6.saddr.s6_addr32[0], 0,
+               src_ipv4_src_ipv6.ipv6_layout.ipv6[0]),
+       OFFLOAD(SIPV6_95_64,  4, ip6.saddr.s6_addr32[1], 0,
+               src_ipv4_src_ipv6.ipv6_layout.ipv6[4]),
+       OFFLOAD(SIPV6_63_32,  4, ip6.saddr.s6_addr32[2], 0,
+               src_ipv4_src_ipv6.ipv6_layout.ipv6[8]),
+       OFFLOAD(SIPV6_31_0,   4, ip6.saddr.s6_addr32[3], 0,
+               src_ipv4_src_ipv6.ipv6_layout.ipv6[12]),
+       OFFLOAD(DIPV6_127_96, 4, ip6.daddr.s6_addr32[0], 0,
+               dst_ipv4_dst_ipv6.ipv6_layout.ipv6[0]),
+       OFFLOAD(DIPV6_95_64,  4, ip6.daddr.s6_addr32[1], 0,
+               dst_ipv4_dst_ipv6.ipv6_layout.ipv6[4]),
+       OFFLOAD(DIPV6_63_32,  4, ip6.daddr.s6_addr32[2], 0,
+               dst_ipv4_dst_ipv6.ipv6_layout.ipv6[8]),
+       OFFLOAD(DIPV6_31_0,   4, ip6.daddr.s6_addr32[3], 0,
+               dst_ipv4_dst_ipv6.ipv6_layout.ipv6[12]),
+       OFFLOAD(IPV6_HOPLIMIT, 1, ip6.hop_limit, 0, ttl_hoplimit),
+
+       OFFLOAD(TCP_SPORT, 2, tcp.source,  0, tcp_sport),
+       OFFLOAD(TCP_DPORT, 2, tcp.dest,    0, tcp_dport),
+       OFFLOAD(TCP_FLAGS, 1, tcp.ack_seq, 5, tcp_flags),
+
+       OFFLOAD(UDP_SPORT, 2, udp.source, 0, udp_sport),
+       OFFLOAD(UDP_DPORT, 2, udp.dest,   0, udp_dport),
 };
 
 /* On input attr->max_mod_hdr_actions tells how many HW actions can be parsed at
@@ -1916,9 +1982,14 @@ static struct mlx5_fields fields[] = {
  */
 static int offload_pedit_fields(struct pedit_headers_action *hdrs,
                                struct mlx5e_tc_flow_parse_attr *parse_attr,
+                               u32 *action_flags,
                                struct netlink_ext_ack *extack)
 {
        struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
+       void *headers_c = get_match_headers_criteria(*action_flags,
+                                                    &parse_attr->spec);
+       void *headers_v = get_match_headers_value(*action_flags,
+                                                 &parse_attr->spec);
        int i, action_size, nactions, max_actions, first, last, next_z;
        void *s_masks_p, *a_masks_p, *vals_p;
        struct mlx5_fields *f;
@@ -1942,6 +2013,8 @@ static int offload_pedit_fields(struct pedit_headers_action *hdrs,
        nactions = parse_attr->num_mod_hdr_actions;
 
        for (i = 0; i < ARRAY_SIZE(fields); i++) {
+               bool skip;
+
                f = &fields[i];
                /* avoid seeing bits set from previous iterations */
                s_mask = 0;
@@ -1970,19 +2043,34 @@ static int offload_pedit_fields(struct pedit_headers_action *hdrs,
                        return -EOPNOTSUPP;
                }
 
+               skip = false;
                if (s_mask) {
+                       void *match_mask = headers_c + f->match_offset;
+                       void *match_val = headers_v + f->match_offset;
+
                        cmd  = MLX5_ACTION_TYPE_SET;
                        mask = s_mask;
                        vals_p = (void *)set_vals + f->offset;
+                       /* don't rewrite if we have a match on the same value */
+                       if (cmp_val_mask(vals_p, s_masks_p, match_val,
+                                        match_mask, f->size))
+                               skip = true;
                        /* clear to denote we consumed this field */
                        memset(s_masks_p, 0, f->size);
                } else {
+                       u32 zero = 0;
+
                        cmd  = MLX5_ACTION_TYPE_ADD;
                        mask = a_mask;
                        vals_p = (void *)add_vals + f->offset;
+                       /* add 0 is no change */
+                       if (!memcmp(vals_p, &zero, f->size))
+                               skip = true;
                        /* clear to denote we consumed this field */
                        memset(a_masks_p, 0, f->size);
                }
+               if (skip)
+                       continue;
 
                field_bsize = f->size * BITS_PER_BYTE;
 
@@ -2029,6 +2117,15 @@ static int offload_pedit_fields(struct pedit_headers_action *hdrs,
        return 0;
 }
 
+static int mlx5e_flow_namespace_max_modify_action(struct mlx5_core_dev *mdev,
+                                                 int namespace)
+{
+       if (namespace == MLX5_FLOW_NAMESPACE_FDB) /* FDB offloading */
+               return MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, max_modify_header_actions);
+       else /* namespace is MLX5_FLOW_NAMESPACE_KERNEL - NIC offloading */
+               return MLX5_CAP_FLOWTABLE_NIC_RX(mdev, max_modify_header_actions);
+}
+
 static int alloc_mod_hdr_actions(struct mlx5e_priv *priv,
                                 struct pedit_headers_action *hdrs,
                                 int namespace,
@@ -2040,11 +2137,7 @@ static int alloc_mod_hdr_actions(struct mlx5e_priv *priv,
                hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits;
        action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
 
-       if (namespace == MLX5_FLOW_NAMESPACE_FDB) /* FDB offloading */
-               max_actions = MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, max_modify_header_actions);
-       else /* namespace is MLX5_FLOW_NAMESPACE_KERNEL - NIC offloading */
-               max_actions = MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, max_modify_header_actions);
-
+       max_actions = mlx5e_flow_namespace_max_modify_action(priv->mdev, namespace);
        /* can get up to crazingly 16 HW actions in 32 bits pedit SW key */
        max_actions = min(max_actions, nkeys * 16);
 
@@ -2077,6 +2170,12 @@ static int parse_tc_pedit_action(struct mlx5e_priv *priv,
                goto out_err;
        }
 
+       if (!mlx5e_flow_namespace_max_modify_action(priv->mdev, namespace)) {
+               NL_SET_ERR_MSG_MOD(extack,
+                                  "The pedit offload action is not supported");
+               goto out_err;
+       }
+
        mask = act->mangle.mask;
        val = act->mangle.val;
        offset = act->mangle.offset;
@@ -2095,6 +2194,7 @@ static int parse_tc_pedit_action(struct mlx5e_priv *priv,
 static int alloc_tc_pedit_action(struct mlx5e_priv *priv, int namespace,
                                 struct mlx5e_tc_flow_parse_attr *parse_attr,
                                 struct pedit_headers_action *hdrs,
+                                u32 *action_flags,
                                 struct netlink_ext_ack *extack)
 {
        struct pedit_headers *cmd_masks;
@@ -2107,7 +2207,7 @@ static int alloc_tc_pedit_action(struct mlx5e_priv *priv, int namespace,
                        goto out_err;
        }
 
-       err = offload_pedit_fields(hdrs, parse_attr, extack);
+       err = offload_pedit_fields(hdrs, parse_attr, action_flags, extack);
        if (err < 0)
                goto out_dealloc_parsed_actions;
 
@@ -2161,6 +2261,52 @@ static bool csum_offload_supported(struct mlx5e_priv *priv,
        return true;
 }
 
+struct ip_ttl_word {
+       __u8    ttl;
+       __u8    protocol;
+       __sum16 check;
+};
+
+struct ipv6_hoplimit_word {
+       __be16  payload_len;
+       __u8    nexthdr;
+       __u8    hop_limit;
+};
+
+static bool is_action_keys_supported(const struct flow_action_entry *act)
+{
+       u32 mask, offset;
+       u8 htype;
+
+       htype = act->mangle.htype;
+       offset = act->mangle.offset;
+       mask = ~act->mangle.mask;
+       /* For IPv4 & IPv6 header check 4 byte word,
+        * to determine that modified fields
+        * are NOT ttl & hop_limit only.
+        */
+       if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP4) {
+               struct ip_ttl_word *ttl_word =
+                       (struct ip_ttl_word *)&mask;
+
+               if (offset != offsetof(struct iphdr, ttl) ||
+                   ttl_word->protocol ||
+                   ttl_word->check) {
+                       return true;
+               }
+       } else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) {
+               struct ipv6_hoplimit_word *hoplimit_word =
+                       (struct ipv6_hoplimit_word *)&mask;
+
+               if (offset != offsetof(struct ipv6hdr, payload_len) ||
+                   hoplimit_word->payload_len ||
+                   hoplimit_word->nexthdr) {
+                       return true;
+               }
+       }
+       return false;
+}
+
 static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
                                          struct flow_action *flow_action,
                                          u32 actions,
@@ -2168,16 +2314,12 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
 {
        const struct flow_action_entry *act;
        bool modify_ip_header;
-       u8 htype, ip_proto;
        void *headers_v;
        u16 ethertype;
+       u8 ip_proto;
        int i;
 
-       if (actions & MLX5_FLOW_CONTEXT_ACTION_DECAP)
-               headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, inner_headers);
-       else
-               headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
-
+       headers_v = get_match_headers_value(actions, spec);
        ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
 
        /* for non-IP we only re-write MACs, so we're okay */
@@ -2190,9 +2332,7 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
                    act->id != FLOW_ACTION_ADD)
                        continue;
 
-               htype = act->mangle.htype;
-               if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP4 ||
-                   htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) {
+               if (is_action_keys_supported(act)) {
                        modify_ip_header = true;
                        break;
                }
@@ -2225,7 +2365,8 @@ static bool actions_match_supported(struct mlx5e_priv *priv,
                actions = flow->nic_attr->action;
 
        if (flow->flags & MLX5E_TC_FLOW_EGRESS &&
-           !(actions & MLX5_FLOW_CONTEXT_ACTION_DECAP))
+           !((actions & MLX5_FLOW_CONTEXT_ACTION_DECAP) ||
+             (actions & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP)))
                return false;
 
        if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
@@ -2265,10 +2406,25 @@ static int add_vlan_rewrite_action(struct mlx5e_priv *priv, int namespace,
                .mangle.mask = ~(u32)be16_to_cpu(*(__be16 *)&mask16),
                .mangle.val = (u32)be16_to_cpu(*(__be16 *)&val16),
        };
+       u8 match_prio_mask, match_prio_val;
+       void *headers_c, *headers_v;
        int err;
 
-       if (act->vlan.prio) {
-               NL_SET_ERR_MSG_MOD(extack, "Setting VLAN prio is not supported");
+       headers_c = get_match_headers_criteria(*action, &parse_attr->spec);
+       headers_v = get_match_headers_value(*action, &parse_attr->spec);
+
+       if (!(MLX5_GET(fte_match_set_lyr_2_4, headers_c, cvlan_tag) &&
+             MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag))) {
+               NL_SET_ERR_MSG_MOD(extack,
+                                  "VLAN rewrite action must have VLAN protocol match");
+               return -EOPNOTSUPP;
+       }
+
+       match_prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio);
+       match_prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio);
+       if (act->vlan.prio != (match_prio_val & match_prio_mask)) {
+               NL_SET_ERR_MSG_MOD(extack,
+                                  "Changing VLAN prio is not supported");
                return -EOPNOTSUPP;
        }
 
@@ -2362,16 +2518,22 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv,
                        }
                        break;
                default:
-                       return -EINVAL;
+                       NL_SET_ERR_MSG_MOD(extack, "The offload action is not supported");
+                       return -EOPNOTSUPP;
                }
        }
 
        if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits ||
            hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) {
                err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_KERNEL,
-                                           parse_attr, hdrs, extack);
+                                           parse_attr, hdrs, &action, extack);
                if (err)
                        return err;
+               /* in case all pedit actions are skipped, remove the MOD_HDR
+                * flag.
+                */
+               if (parse_attr->num_mod_hdr_actions == 0)
+                       action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
        }
 
        attr->action = action;
@@ -2381,15 +2543,22 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv,
        return 0;
 }
 
-static inline int cmp_encap_info(struct ip_tunnel_key *a,
-                                struct ip_tunnel_key *b)
+struct encap_key {
+       struct ip_tunnel_key *ip_tun_key;
+       int tunnel_type;
+};
+
+static inline int cmp_encap_info(struct encap_key *a,
+                                struct encap_key *b)
 {
-       return memcmp(a, b, sizeof(*a));
+       return memcmp(a->ip_tun_key, b->ip_tun_key, sizeof(*a->ip_tun_key)) ||
+              a->tunnel_type != b->tunnel_type;
 }
 
-static inline int hash_encap_info(struct ip_tunnel_key *key)
+static inline int hash_encap_info(struct encap_key *key)
 {
-       return jhash(key, sizeof(*key), 0);
+       return jhash(key->ip_tun_key, sizeof(*key->ip_tun_key),
+                    key->tunnel_type);
 }
 
 
@@ -2420,7 +2589,7 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
        struct mlx5_esw_flow_attr *attr = flow->esw_attr;
        struct mlx5e_tc_flow_parse_attr *parse_attr;
        struct ip_tunnel_info *tun_info;
-       struct ip_tunnel_key *key;
+       struct encap_key key, e_key;
        struct mlx5e_encap_entry *e;
        unsigned short family;
        uintptr_t hash_key;
@@ -2430,13 +2599,16 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
        parse_attr = attr->parse_attr;
        tun_info = &parse_attr->tun_info[out_index];
        family = ip_tunnel_info_af(tun_info);
-       key = &tun_info->key;
+       key.ip_tun_key = &tun_info->key;
+       key.tunnel_type = mlx5e_tc_tun_get_type(mirred_dev);
 
-       hash_key = hash_encap_info(key);
+       hash_key = hash_encap_info(&key);
 
        hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
                                   encap_hlist, hash_key) {
-               if (!cmp_encap_info(&e->tun_info.key, key)) {
+               e_key.ip_tun_key = &e->tun_info.key;
+               e_key.tunnel_type = e->tunnel_type;
+               if (!cmp_encap_info(&e_key, &key)) {
                        found = true;
                        break;
                }
@@ -2539,15 +2711,60 @@ static int parse_tc_vlan_action(struct mlx5e_priv *priv,
        return 0;
 }
 
+static int add_vlan_push_action(struct mlx5e_priv *priv,
+                               struct mlx5_esw_flow_attr *attr,
+                               struct net_device **out_dev,
+                               u32 *action)
+{
+       struct net_device *vlan_dev = *out_dev;
+       struct flow_action_entry vlan_act = {
+               .id = FLOW_ACTION_VLAN_PUSH,
+               .vlan.vid = vlan_dev_vlan_id(vlan_dev),
+               .vlan.proto = vlan_dev_vlan_proto(vlan_dev),
+               .vlan.prio = 0,
+       };
+       int err;
+
+       err = parse_tc_vlan_action(priv, &vlan_act, attr, action);
+       if (err)
+               return err;
+
+       *out_dev = dev_get_by_index_rcu(dev_net(vlan_dev),
+                                       dev_get_iflink(vlan_dev));
+       if (is_vlan_dev(*out_dev))
+               err = add_vlan_push_action(priv, attr, out_dev, action);
+
+       return err;
+}
+
+static int add_vlan_pop_action(struct mlx5e_priv *priv,
+                              struct mlx5_esw_flow_attr *attr,
+                              u32 *action)
+{
+       int nest_level = vlan_get_encap_level(attr->parse_attr->filter_dev);
+       struct flow_action_entry vlan_act = {
+               .id = FLOW_ACTION_VLAN_POP,
+       };
+       int err = 0;
+
+       while (nest_level--) {
+               err = parse_tc_vlan_action(priv, &vlan_act, attr, action);
+               if (err)
+                       return err;
+       }
+
+       return err;
+}
+
 static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
                                struct flow_action *flow_action,
-                               struct mlx5e_tc_flow_parse_attr *parse_attr,
                                struct mlx5e_tc_flow *flow,
                                struct netlink_ext_ack *extack)
 {
        struct pedit_headers_action hdrs[2] = {};
        struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
        struct mlx5_esw_flow_attr *attr = flow->esw_attr;
+       struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
        struct mlx5e_rep_priv *rpriv = priv->ppriv;
        const struct ip_tunnel_info *info = NULL;
        const struct flow_action_entry *act;
@@ -2619,6 +2836,20 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
                                    uplink_upper == out_dev)
                                        out_dev = uplink_dev;
 
+                               if (is_vlan_dev(out_dev)) {
+                                       err = add_vlan_push_action(priv, attr,
+                                                                  &out_dev,
+                                                                  &action);
+                                       if (err)
+                                               return err;
+                               }
+                               if (is_vlan_dev(parse_attr->filter_dev)) {
+                                       err = add_vlan_pop_action(priv, attr,
+                                                                 &action);
+                                       if (err)
+                                               return err;
+                               }
+
                                if (!mlx5e_eswitch_rep(out_dev))
                                        return -EOPNOTSUPP;
 
@@ -2632,7 +2863,6 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
                                        out_dev->ifindex;
                                parse_attr->tun_info[attr->out_count] = *info;
                                encap = false;
-                               attr->parse_attr = parse_attr;
                                attr->dests[attr->out_count].flags |=
                                        MLX5_ESW_DEST_ENCAP;
                                attr->out_count++;
@@ -2711,16 +2941,27 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
                        break;
                        }
                default:
-                       return -EINVAL;
+                       NL_SET_ERR_MSG_MOD(extack, "The offload action is not supported");
+                       return -EOPNOTSUPP;
                }
        }
 
        if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits ||
            hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) {
-               err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_KERNEL,
-                                           parse_attr, hdrs, extack);
+               err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_FDB,
+                                           parse_attr, hdrs, &action, extack);
                if (err)
                        return err;
+               /* in case all pedit actions are skipped, remove the MOD_HDR
+                * flag. we might have set split_count either by pedit or
+                * pop/push. if there is no pop/push either, reset it too.
+                */
+               if (parse_attr->num_mod_hdr_actions == 0) {
+                       action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+                       if (!((action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) ||
+                             (action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH)))
+                               attr->split_count = 0;
+               }
        }
 
        attr->action = action;
@@ -2889,7 +3130,7 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
        if (err)
                goto err_free;
 
-       err = parse_tc_fdb_actions(priv, &rule->action, parse_attr, flow, extack);
+       err = parse_tc_fdb_actions(priv, &rule->action, flow, extack);
        if (err)
                goto err_free;
 
index 41e2a01d3713f36bb956a566980a467e2d980f04..40f3f98aa279c7c8c1dd96abf4778d0c1bd694bf 100644 (file)
@@ -297,7 +297,8 @@ static inline void mlx5e_fill_sq_frag_edge(struct mlx5e_txqsq *sq,
 static inline void
 mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
                     u8 opcode, u16 ds_cnt, u8 num_wqebbs, u32 num_bytes, u8 num_dma,
-                    struct mlx5e_tx_wqe_info *wi, struct mlx5_wqe_ctrl_seg *cseg)
+                    struct mlx5e_tx_wqe_info *wi, struct mlx5_wqe_ctrl_seg *cseg,
+                    bool xmit_more)
 {
        struct mlx5_wq_cyc *wq = &sq->wq;
 
@@ -320,14 +321,14 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
                sq->stats->stopped++;
        }
 
-       if (!skb->xmit_more || netif_xmit_stopped(sq->txq))
+       if (!xmit_more || netif_xmit_stopped(sq->txq))
                mlx5e_notify_hw(wq, sq->pc, sq->uar_map, cseg);
 }
 
 #define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start))
 
 netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
-                         struct mlx5e_tx_wqe *wqe, u16 pi)
+                         struct mlx5e_tx_wqe *wqe, u16 pi, bool xmit_more)
 {
        struct mlx5_wq_cyc *wq = &sq->wq;
        struct mlx5_wqe_ctrl_seg *cseg;
@@ -360,7 +361,7 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
        }
 
        stats->bytes     += num_bytes;
-       stats->xmit_more += skb->xmit_more;
+       stats->xmit_more += netdev_xmit_more();
 
        headlen = skb->len - ihs - skb->data_len;
        ds_cnt += !!headlen;
@@ -423,7 +424,7 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
                goto err_drop;
 
        mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt, num_wqebbs, num_bytes,
-                            num_dma, wi, cseg);
+                            num_dma, wi, cseg, xmit_more);
 
        return NETDEV_TX_OK;
 
@@ -449,7 +450,7 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
        if (unlikely(!skb))
                return NETDEV_TX_OK;
 
-       return mlx5e_sq_xmit(sq, skb, wqe, pi);
+       return mlx5e_sq_xmit(sq, skb, wqe, pi, netdev_xmit_more());
 }
 
 static void mlx5e_dump_error_cqe(struct mlx5e_txqsq *sq,
@@ -659,7 +660,7 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
        }
 
        stats->bytes     += num_bytes;
-       stats->xmit_more += skb->xmit_more;
+       stats->xmit_more += netdev_xmit_more();
 
        headlen = skb->len - ihs - skb->data_len;
        ds_cnt += !!headlen;
@@ -704,7 +705,7 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
                goto err_drop;
 
        mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt, num_wqebbs, num_bytes,
-                            num_dma, wi, cseg);
+                            num_dma, wi, cseg, false);
 
        return NETDEV_TX_OK;
 
index 46a747f7c1628f88f2a65917d34f1940139d8d72..e9837aeb7088d8f611179e2b74c417d73ffa7d7a 100644 (file)
@@ -707,7 +707,7 @@ void mlx5_eq_update_ci(struct mlx5_eq *eq, u32 cc, bool arm)
 
        __raw_writel((__force u32)cpu_to_be32(val), addr);
        /* We still want ordering, just not swabbing, so add a barrier */
-       mb();
+       wmb();
 }
 EXPORT_SYMBOL(mlx5_eq_update_ci);
 
index ecd2c747f7260306fd972478ecce71610918e3b3..8a67fd197b7923f67af1872eae280e0f5e3eb663 100644 (file)
@@ -105,8 +105,7 @@ static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
                 opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
        MLX5_SET(modify_nic_vport_context_in, in, field_select.change_event, 1);
        MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
-       if (vport)
-               MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
+       MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
        nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
                                     in, nic_vport_context);
 
@@ -134,8 +133,7 @@ static int modify_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport,
        MLX5_SET(modify_esw_vport_context_in, in, opcode,
                 MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
        MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
-       if (vport)
-               MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
+       MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
        return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
 }
 
@@ -431,6 +429,8 @@ static int esw_create_legacy_table(struct mlx5_eswitch *esw)
 {
        int err;
 
+       memset(&esw->fdb_table.legacy, 0, sizeof(struct legacy_fdb));
+
        err = esw_create_legacy_vepa_table(esw);
        if (err)
                return err;
@@ -2157,6 +2157,7 @@ static int _mlx5_eswitch_set_vepa_locked(struct mlx5_eswitch *esw,
 
        /* Star rule to forward all traffic to uplink vport */
        memset(spec, 0, sizeof(*spec));
+       memset(&dest, 0, sizeof(dest));
        dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
        dest.vport.num = MLX5_VPORT_UPLINK;
        flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
index 6c72f33f6d09ea6c589ba51b16e1cae3707ea482..1a3cab34b85035bfe427ba4f1e50d45f12385ea2 100644 (file)
@@ -1609,6 +1609,7 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw, int nvports)
 {
        int err;
 
+       memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
        mutex_init(&esw->fdb_table.offloads.fdb_prio_lock);
 
        err = esw_create_offloads_fdb_tables(esw, nvports);
@@ -1697,8 +1698,6 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int vf_nvports,
 {
        int err;
 
-       mutex_init(&esw->fdb_table.offloads.fdb_prio_lock);
-
        err = esw_offloads_steering_init(esw, total_nvports);
        if (err)
                return err;
index 5d5864e8df3c1ce4daefc99c2ffdcc4447044df4..a81e8d2168d8432d290767c702c136233a726594 100644 (file)
@@ -21,6 +21,7 @@ struct mlx5_event_nb {
 static int any_notifier(struct notifier_block *, unsigned long, void *);
 static int temp_warn(struct notifier_block *, unsigned long, void *);
 static int port_module(struct notifier_block *, unsigned long, void *);
+static int pcie_core(struct notifier_block *, unsigned long, void *);
 
 /* handler which forwards the event to events->nh, driver notifiers */
 static int forward_event(struct notifier_block *, unsigned long, void *);
@@ -30,6 +31,7 @@ static struct mlx5_nb events_nbs_ref[] = {
        {.nb.notifier_call = any_notifier,  .event_type = MLX5_EVENT_TYPE_NOTIFY_ANY },
        {.nb.notifier_call = temp_warn,     .event_type = MLX5_EVENT_TYPE_TEMP_WARN_EVENT },
        {.nb.notifier_call = port_module,   .event_type = MLX5_EVENT_TYPE_PORT_MODULE_EVENT },
+       {.nb.notifier_call = pcie_core,     .event_type = MLX5_EVENT_TYPE_GENERAL_EVENT },
 
        /* Events to be forwarded (as is) to mlx5 core interfaces (mlx5e/mlx5_ib) */
        {.nb.notifier_call = forward_event,   .event_type = MLX5_EVENT_TYPE_PORT_CHANGE },
@@ -51,11 +53,14 @@ static struct mlx5_nb events_nbs_ref[] = {
 
 struct mlx5_events {
        struct mlx5_core_dev *dev;
+       struct workqueue_struct *wq;
        struct mlx5_event_nb  notifiers[ARRAY_SIZE(events_nbs_ref)];
        /* driver notifier chain */
        struct atomic_notifier_head nh;
        /* port module events stats */
        struct mlx5_pme_stats pme_stats;
+       /*pcie_core*/
+       struct work_struct pcie_core_work;
 };
 
 static const char *eqe_type_str(u8 type)
@@ -249,6 +254,69 @@ static int port_module(struct notifier_block *nb, unsigned long type, void *data
        return NOTIFY_OK;
 }
 
+enum {
+       MLX5_PCI_POWER_COULD_NOT_BE_READ = 0x0,
+       MLX5_PCI_POWER_SUFFICIENT_REPORTED = 0x1,
+       MLX5_PCI_POWER_INSUFFICIENT_REPORTED = 0x2,
+};
+
+static void mlx5_pcie_event(struct work_struct *work)
+{
+       u32 out[MLX5_ST_SZ_DW(mpein_reg)] = {0};
+       u32 in[MLX5_ST_SZ_DW(mpein_reg)] = {0};
+       struct mlx5_events *events;
+       struct mlx5_core_dev *dev;
+       u8 power_status;
+       u16 pci_power;
+
+       events = container_of(work, struct mlx5_events, pcie_core_work);
+       dev  = events->dev;
+
+       if (!MLX5_CAP_MCAM_FEATURE(dev, pci_status_and_power))
+               return;
+
+       mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out),
+                            MLX5_REG_MPEIN, 0, 0);
+       power_status = MLX5_GET(mpein_reg, out, pwr_status);
+       pci_power = MLX5_GET(mpein_reg, out, pci_power);
+
+       switch (power_status) {
+       case MLX5_PCI_POWER_COULD_NOT_BE_READ:
+               mlx5_core_info_rl(dev,
+                                 "PCIe slot power capability was not advertised.\n");
+               break;
+       case MLX5_PCI_POWER_INSUFFICIENT_REPORTED:
+               mlx5_core_warn_rl(dev,
+                                 "Detected insufficient power on the PCIe slot (%uW).\n",
+                                 pci_power);
+               break;
+       case MLX5_PCI_POWER_SUFFICIENT_REPORTED:
+               mlx5_core_info_rl(dev,
+                                 "PCIe slot advertised sufficient power (%uW).\n",
+                                 pci_power);
+               break;
+       }
+}
+
+static int pcie_core(struct notifier_block *nb, unsigned long type, void *data)
+{
+       struct mlx5_event_nb    *event_nb = mlx5_nb_cof(nb,
+                                                       struct mlx5_event_nb,
+                                                       nb);
+       struct mlx5_events      *events   = event_nb->ctx;
+       struct mlx5_eqe         *eqe      = data;
+
+       switch (eqe->sub_type) {
+       case MLX5_GENERAL_SUBTYPE_PCI_POWER_CHANGE_EVENT:
+                       queue_work(events->wq, &events->pcie_core_work);
+               break;
+       default:
+               return NOTIFY_DONE;
+       }
+
+       return NOTIFY_OK;
+}
+
 void mlx5_get_pme_stats(struct mlx5_core_dev *dev, struct mlx5_pme_stats *stats)
 {
        *stats = dev->priv.events->pme_stats;
@@ -277,11 +345,17 @@ int mlx5_events_init(struct mlx5_core_dev *dev)
        ATOMIC_INIT_NOTIFIER_HEAD(&events->nh);
        events->dev = dev;
        dev->priv.events = events;
+       events->wq = create_singlethread_workqueue("mlx5_events");
+       if (!events->wq)
+               return -ENOMEM;
+       INIT_WORK(&events->pcie_core_work, mlx5_pcie_event);
+
        return 0;
 }
 
 void mlx5_events_cleanup(struct mlx5_core_dev *dev)
 {
+       destroy_workqueue(dev->priv.events->wq);
        kvfree(dev->priv.events);
 }
 
@@ -304,6 +378,7 @@ void mlx5_events_stop(struct mlx5_core_dev *dev)
 
        for (i = ARRAY_SIZE(events_nbs_ref) - 1; i >= 0 ; i--)
                mlx5_eq_notifier_unregister(dev, &events->notifiers[i].nb);
+       flush_workqueue(events->wq);
 }
 
 int mlx5_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb)
index 873541ef4c1b754b15209f7516bcee07378f6763..ca2296a2f9ee321cd388088369454f3f50e9301d 100644 (file)
@@ -135,7 +135,7 @@ static void mlx5_fpga_conn_notify_hw(struct mlx5_fpga_conn *conn, void *wqe)
        *conn->qp.wq.sq.db = cpu_to_be32(conn->qp.sq.pc);
        /* Make sure that doorbell record is visible before ringing */
        wmb();
-       mlx5_write64(wqe, conn->fdev->conn_res.uar->map + MLX5_BF_OFFSET, NULL);
+       mlx5_write64(wqe, conn->fdev->conn_res.uar->map + MLX5_BF_OFFSET);
 }
 
 static void mlx5_fpga_conn_post_send(struct mlx5_fpga_conn *conn,
index 7e2e871dbf833b059d790bf161feb7264f2df093..52c9dee91ea465340155a14b9a4bcffc55b03cbb 100644 (file)
@@ -37,6 +37,7 @@
 
 #include <linux/mlx5/eq.h>
 
+#include "mlx5_core.h"
 #include "lib/eq.h"
 #include "fpga/cmd.h"
 
@@ -62,26 +63,26 @@ struct mlx5_fpga_device {
 };
 
 #define mlx5_fpga_dbg(__adev, format, ...) \
-       dev_dbg(&(__adev)->mdev->pdev->dev, "FPGA: %s:%d:(pid %d): " format, \
-                __func__, __LINE__, current->pid, ##__VA_ARGS__)
+       mlx5_core_dbg((__adev)->mdev, "FPGA: %s:%d:(pid %d): " format, \
+                      __func__, __LINE__, current->pid, ##__VA_ARGS__)
 
 #define mlx5_fpga_err(__adev, format, ...) \
-       dev_err(&(__adev)->mdev->pdev->dev, "FPGA: %s:%d:(pid %d): " format, \
-               __func__, __LINE__, current->pid, ##__VA_ARGS__)
+       mlx5_core_err((__adev)->mdev, "FPGA: %s:%d:(pid %d): " format, \
+                     __func__, __LINE__, current->pid, ##__VA_ARGS__)
 
 #define mlx5_fpga_warn(__adev, format, ...) \
-       dev_warn(&(__adev)->mdev->pdev->dev, "FPGA: %s:%d:(pid %d): " format, \
-               __func__, __LINE__, current->pid, ##__VA_ARGS__)
+       mlx5_core_warn((__adev)->mdev, "FPGA: %s:%d:(pid %d): " format, \
+                      __func__, __LINE__, current->pid, ##__VA_ARGS__)
 
 #define mlx5_fpga_warn_ratelimited(__adev, format, ...) \
-       dev_warn_ratelimited(&(__adev)->mdev->pdev->dev, "FPGA: %s:%d: " \
-               format, __func__, __LINE__, ##__VA_ARGS__)
+       mlx5_core_err_rl((__adev)->mdev, "FPGA: %s:%d: " \
+                        format, __func__, __LINE__, ##__VA_ARGS__)
 
 #define mlx5_fpga_notice(__adev, format, ...) \
-       dev_notice(&(__adev)->mdev->pdev->dev, "FPGA: " format, ##__VA_ARGS__)
+       mlx5_core_info((__adev)->mdev, "FPGA: " format, ##__VA_ARGS__)
 
 #define mlx5_fpga_info(__adev, format, ...) \
-       dev_info(&(__adev)->mdev->pdev->dev, "FPGA: " format, ##__VA_ARGS__)
+       mlx5_core_info((__adev)->mdev, "FPGA: " format, ##__VA_ARGS__)
 
 int mlx5_fpga_init(struct mlx5_core_dev *mdev);
 void mlx5_fpga_cleanup(struct mlx5_core_dev *mdev);
index 5cf5f2a9d51fec724f4fac709e29e40f4110d5f7..22a2ef11151441c3abcfc07e7a6e66e292563cae 100644 (file)
@@ -148,14 +148,16 @@ static int mlx5_fpga_tls_alloc_swid(struct idr *idr, spinlock_t *idr_spinlock,
        return ret;
 }
 
-static void mlx5_fpga_tls_release_swid(struct idr *idr,
-                                      spinlock_t *idr_spinlock, u32 swid)
+static void *mlx5_fpga_tls_release_swid(struct idr *idr,
+                                       spinlock_t *idr_spinlock, u32 swid)
 {
        unsigned long flags;
+       void *ptr;
 
        spin_lock_irqsave(idr_spinlock, flags);
-       idr_remove(idr, swid);
+       ptr = idr_remove(idr, swid);
        spin_unlock_irqrestore(idr_spinlock, flags);
+       return ptr;
 }
 
 static void mlx_tls_kfree_complete(struct mlx5_fpga_conn *conn,
@@ -165,20 +167,12 @@ static void mlx_tls_kfree_complete(struct mlx5_fpga_conn *conn,
        kfree(buf);
 }
 
-struct mlx5_teardown_stream_context {
-       struct mlx5_fpga_tls_command_context cmd;
-       u32 swid;
-};
-
 static void
 mlx5_fpga_tls_teardown_completion(struct mlx5_fpga_conn *conn,
                                  struct mlx5_fpga_device *fdev,
                                  struct mlx5_fpga_tls_command_context *cmd,
                                  struct mlx5_fpga_dma_buf *resp)
 {
-       struct mlx5_teardown_stream_context *ctx =
-                   container_of(cmd, struct mlx5_teardown_stream_context, cmd);
-
        if (resp) {
                u32 syndrome = MLX5_GET(tls_resp, resp->sg[0].data, syndrome);
 
@@ -186,14 +180,6 @@ mlx5_fpga_tls_teardown_completion(struct mlx5_fpga_conn *conn,
                        mlx5_fpga_err(fdev,
                                      "Teardown stream failed with syndrome = %d",
                                      syndrome);
-               else if (MLX5_GET(tls_cmd, cmd->buf.sg[0].data, direction_sx))
-                       mlx5_fpga_tls_release_swid(&fdev->tls->tx_idr,
-                                                  &fdev->tls->tx_idr_spinlock,
-                                                  ctx->swid);
-               else
-                       mlx5_fpga_tls_release_swid(&fdev->tls->rx_idr,
-                                                  &fdev->tls->rx_idr_spinlock,
-                                                  ctx->swid);
        }
        mlx5_fpga_tls_put_command_ctx(cmd);
 }
@@ -225,8 +211,14 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
 
        rcu_read_lock();
        flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle));
-       rcu_read_unlock();
+       if (unlikely(!flow)) {
+               rcu_read_unlock();
+               WARN_ONCE(1, "Received NULL pointer for handle\n");
+               kfree(buf);
+               return -EINVAL;
+       }
        mlx5_fpga_tls_flow_to_cmd(flow, cmd);
+       rcu_read_unlock();
 
        MLX5_SET(tls_cmd, cmd, swid, ntohl(handle));
        MLX5_SET64(tls_cmd, cmd, tls_rcd_sn, be64_to_cpu(rcd_sn));
@@ -238,6 +230,8 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
        buf->complete = mlx_tls_kfree_complete;
 
        ret = mlx5_fpga_sbu_conn_sendmsg(mdev->fpga->tls->conn, buf);
+       if (ret < 0)
+               kfree(buf);
 
        return ret;
 }
@@ -245,7 +239,7 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
 static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev,
                                            void *flow, u32 swid, gfp_t flags)
 {
-       struct mlx5_teardown_stream_context *ctx;
+       struct mlx5_fpga_tls_command_context *ctx;
        struct mlx5_fpga_dma_buf *buf;
        void *cmd;
 
@@ -253,7 +247,7 @@ static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev,
        if (!ctx)
                return;
 
-       buf = &ctx->cmd.buf;
+       buf = &ctx->buf;
        cmd = (ctx + 1);
        MLX5_SET(tls_cmd, cmd, command_type, CMD_TEARDOWN_STREAM);
        MLX5_SET(tls_cmd, cmd, swid, swid);
@@ -264,8 +258,7 @@ static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev,
        buf->sg[0].data = cmd;
        buf->sg[0].size = MLX5_TLS_COMMAND_SIZE;
 
-       ctx->swid = swid;
-       mlx5_fpga_tls_cmd_send(mdev->fpga, &ctx->cmd,
+       mlx5_fpga_tls_cmd_send(mdev->fpga, ctx,
                               mlx5_fpga_tls_teardown_completion);
 }
 
@@ -275,13 +268,14 @@ void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
        struct mlx5_fpga_tls *tls = mdev->fpga->tls;
        void *flow;
 
-       rcu_read_lock();
        if (direction_sx)
-               flow = idr_find(&tls->tx_idr, swid);
+               flow = mlx5_fpga_tls_release_swid(&tls->tx_idr,
+                                                 &tls->tx_idr_spinlock,
+                                                 swid);
        else
-               flow = idr_find(&tls->rx_idr, swid);
-
-       rcu_read_unlock();
+               flow = mlx5_fpga_tls_release_swid(&tls->rx_idr,
+                                                 &tls->rx_idr_spinlock,
+                                                 swid);
 
        if (!flow) {
                mlx5_fpga_err(mdev->fpga, "No flow information for swid %u\n",
@@ -289,6 +283,7 @@ void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
                return;
        }
 
+       synchronize_rcu(); /* before kfree(flow) */
        mlx5_fpga_tls_send_teardown_cmd(mdev, flow, swid, flags);
 }
 
index 0be3eb86dd84e7abc0d7aac32d19b4d7643bc91d..78e073243f40bf0e452cd59111738858923551fa 100644 (file)
@@ -819,7 +819,7 @@ static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio
        struct mlx5_flow_root_namespace *root = find_root(&prio->node);
        struct mlx5_ft_underlay_qp *uqp;
        int min_level = INT_MAX;
-       int err;
+       int err = 0;
        u32 qpn;
 
        if (root->root_ft)
index cb9fa3430c5358678a6a7b719ed0d22e8d2eb15e..3b98fcdd7d0e4c9911668f857b5ec55b5737de8e 100644 (file)
@@ -152,11 +152,11 @@ static void health_recover(struct work_struct *work)
 
        nic_state = mlx5_get_nic_state(dev);
        if (nic_state == MLX5_NIC_IFC_INVALID) {
-               dev_err(&dev->pdev->dev, "health recovery flow aborted since the nic state is invalid\n");
+               mlx5_core_err(dev, "health recovery flow aborted since the nic state is invalid\n");
                return;
        }
 
-       dev_err(&dev->pdev->dev, "starting health recovery flow\n");
+       mlx5_core_err(dev, "starting health recovery flow\n");
        mlx5_recover_device(dev);
 }
 
@@ -180,8 +180,8 @@ static void health_care(struct work_struct *work)
        if (!test_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags))
                schedule_delayed_work(&health->recover_work, recover_delay);
        else
-               dev_err(&dev->pdev->dev,
-                       "new health works are not permitted at this stage\n");
+               mlx5_core_err(dev,
+                             "new health works are not permitted at this stage\n");
        spin_unlock_irqrestore(&health->wq_lock, flags);
 }
 
@@ -228,18 +228,22 @@ static void print_health_info(struct mlx5_core_dev *dev)
                return;
 
        for (i = 0; i < ARRAY_SIZE(h->assert_var); i++)
-               dev_err(&dev->pdev->dev, "assert_var[%d] 0x%08x\n", i, ioread32be(h->assert_var + i));
+               mlx5_core_err(dev, "assert_var[%d] 0x%08x\n", i,
+                             ioread32be(h->assert_var + i));
 
-       dev_err(&dev->pdev->dev, "assert_exit_ptr 0x%08x\n", ioread32be(&h->assert_exit_ptr));
-       dev_err(&dev->pdev->dev, "assert_callra 0x%08x\n", ioread32be(&h->assert_callra));
+       mlx5_core_err(dev, "assert_exit_ptr 0x%08x\n",
+                     ioread32be(&h->assert_exit_ptr));
+       mlx5_core_err(dev, "assert_callra 0x%08x\n",
+                     ioread32be(&h->assert_callra));
        sprintf(fw_str, "%d.%d.%d", fw_rev_maj(dev), fw_rev_min(dev), fw_rev_sub(dev));
-       dev_err(&dev->pdev->dev, "fw_ver %s\n", fw_str);
-       dev_err(&dev->pdev->dev, "hw_id 0x%08x\n", ioread32be(&h->hw_id));
-       dev_err(&dev->pdev->dev, "irisc_index %d\n", ioread8(&h->irisc_index));
-       dev_err(&dev->pdev->dev, "synd 0x%x: %s\n", ioread8(&h->synd), hsynd_str(ioread8(&h->synd)));
-       dev_err(&dev->pdev->dev, "ext_synd 0x%04x\n", ioread16be(&h->ext_synd));
+       mlx5_core_err(dev, "fw_ver %s\n", fw_str);
+       mlx5_core_err(dev, "hw_id 0x%08x\n", ioread32be(&h->hw_id));
+       mlx5_core_err(dev, "irisc_index %d\n", ioread8(&h->irisc_index));
+       mlx5_core_err(dev, "synd 0x%x: %s\n", ioread8(&h->synd),
+                     hsynd_str(ioread8(&h->synd)));
+       mlx5_core_err(dev, "ext_synd 0x%04x\n", ioread16be(&h->ext_synd));
        fw = ioread32be(&h->fw_ver);
-       dev_err(&dev->pdev->dev, "raw fw_ver 0x%08x\n", fw);
+       mlx5_core_err(dev, "raw fw_ver 0x%08x\n", fw);
 }
 
 static unsigned long get_next_poll_jiffies(void)
@@ -262,8 +266,7 @@ void mlx5_trigger_health_work(struct mlx5_core_dev *dev)
        if (!test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags))
                queue_work(health->wq, &health->work);
        else
-               dev_err(&dev->pdev->dev,
-                       "new health works are not permitted at this stage\n");
+               mlx5_core_err(dev, "new health works are not permitted at this stage\n");
        spin_unlock_irqrestore(&health->wq_lock, flags);
 }
 
@@ -284,7 +287,7 @@ static void poll_health(struct timer_list *t)
 
        health->prev = count;
        if (health->miss_counter == MAX_MISSES) {
-               dev_err(&dev->pdev->dev, "device's health compromised - reached miss count\n");
+               mlx5_core_err(dev, "device's health compromised - reached miss count\n");
                print_health_info(dev);
        }
 
@@ -352,6 +355,13 @@ void mlx5_drain_health_recovery(struct mlx5_core_dev *dev)
        cancel_delayed_work_sync(&dev->priv.health.recover_work);
 }
 
+void mlx5_health_flush(struct mlx5_core_dev *dev)
+{
+       struct mlx5_core_health *health = &dev->priv.health;
+
+       flush_workqueue(health->wq);
+}
+
 void mlx5_health_cleanup(struct mlx5_core_dev *dev)
 {
        struct mlx5_core_health *health = &dev->priv.health;
@@ -370,7 +380,7 @@ int mlx5_health_init(struct mlx5_core_dev *dev)
                return -ENOMEM;
 
        strcpy(name, "mlx5_health");
-       strcat(name, dev_name(&dev->pdev->dev));
+       strcat(name, dev->priv.name);
        health->wq = create_singlethread_workqueue(name);
        kfree(name);
        if (!health->wq)
index 4eac42555c7dccececfb9805fb984beb520fbb7b..9b03ae1e1e10d1f7e0516c9bebccd76258117dcd 100644 (file)
@@ -77,15 +77,14 @@ int mlx5i_init(struct mlx5_core_dev *mdev,
               void *ppriv)
 {
        struct mlx5e_priv *priv  = mlx5i_epriv(netdev);
-       u16 max_mtu;
        int err;
 
        err = mlx5e_netdev_init(netdev, priv, mdev, profile, ppriv);
        if (err)
                return err;
 
-       mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
-       netdev->mtu = max_mtu;
+       mlx5e_set_netdev_mtu_boundaries(priv);
+       netdev->mtu = netdev->max_mtu;
 
        mlx5e_build_nic_params(mdev, &priv->rss_params, &priv->channels.params,
                               mlx5e_get_netdev_max_channels(netdev),
index 70cc906a102b2dde87d161385126f43da4948266..5245b0b1770ffde0e9515d43d2ddc33d76219d7d 100644 (file)
@@ -164,26 +164,6 @@ static struct mlx5_profile profile[] = {
                        .size   = 8,
                        .limit  = 4
                },
-               .mr_cache[16]   = {
-                       .size   = 8,
-                       .limit  = 4
-               },
-               .mr_cache[17]   = {
-                       .size   = 8,
-                       .limit  = 4
-               },
-               .mr_cache[18]   = {
-                       .size   = 8,
-                       .limit  = 4
-               },
-               .mr_cache[19]   = {
-                       .size   = 4,
-                       .limit  = 2
-               },
-               .mr_cache[20]   = {
-                       .size   = 4,
-                       .limit  = 2
-               },
        },
 };
 
@@ -587,24 +567,23 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
 
 static int set_hca_cap(struct mlx5_core_dev *dev)
 {
-       struct pci_dev *pdev = dev->pdev;
        int err;
 
        err = handle_hca_cap(dev);
        if (err) {
-               dev_err(&pdev->dev, "handle_hca_cap failed\n");
+               mlx5_core_err(dev, "handle_hca_cap failed\n");
                goto out;
        }
 
        err = handle_hca_cap_atomic(dev);
        if (err) {
-               dev_err(&pdev->dev, "handle_hca_cap_atomic failed\n");
+               mlx5_core_err(dev, "handle_hca_cap_atomic failed\n");
                goto out;
        }
 
        err = handle_hca_cap_odp(dev);
        if (err) {
-               dev_err(&pdev->dev, "handle_hca_cap_odp failed\n");
+               mlx5_core_err(dev, "handle_hca_cap_odp failed\n");
                goto out;
        }
 
@@ -736,36 +715,29 @@ static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
        return -EOPNOTSUPP;
 }
 
-static int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
+static int mlx5_pci_init(struct mlx5_core_dev *dev, struct pci_dev *pdev,
+                        const struct pci_device_id *id)
 {
-       struct pci_dev *pdev = dev->pdev;
+       struct mlx5_priv *priv = &dev->priv;
        int err = 0;
 
-       pci_set_drvdata(dev->pdev, dev);
-       strncpy(priv->name, dev_name(&pdev->dev), MLX5_MAX_NAME_LEN);
-       priv->name[MLX5_MAX_NAME_LEN - 1] = 0;
-
-       mutex_init(&priv->pgdir_mutex);
-       INIT_LIST_HEAD(&priv->pgdir_list);
-       spin_lock_init(&priv->mkey_lock);
+       dev->pdev = pdev;
+       priv->pci_dev_data = id->driver_data;
 
-       mutex_init(&priv->alloc_mutex);
+       pci_set_drvdata(dev->pdev, dev);
 
+       dev->bar_addr = pci_resource_start(pdev, 0);
        priv->numa_node = dev_to_node(&dev->pdev->dev);
 
-       if (mlx5_debugfs_root)
-               priv->dbg_root =
-                       debugfs_create_dir(pci_name(pdev), mlx5_debugfs_root);
-
        err = mlx5_pci_enable_device(dev);
        if (err) {
-               dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
-               goto err_dbg;
+               mlx5_core_err(dev, "Cannot enable PCI device, aborting\n");
+               return err;
        }
 
        err = request_bar(pdev);
        if (err) {
-               dev_err(&pdev->dev, "error requesting BARs, aborting\n");
+               mlx5_core_err(dev, "error requesting BARs, aborting\n");
                goto err_disable;
        }
 
@@ -773,7 +745,7 @@ static int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
 
        err = set_dma_caps(pdev);
        if (err) {
-               dev_err(&pdev->dev, "Failed setting DMA capabilities mask, aborting\n");
+               mlx5_core_err(dev, "Failed setting DMA capabilities mask, aborting\n");
                goto err_clr_master;
        }
 
@@ -782,11 +754,11 @@ static int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
            pci_enable_atomic_ops_to_root(pdev, PCI_EXP_DEVCAP2_ATOMIC_COMP128))
                mlx5_core_dbg(dev, "Enabling pci atomics failed\n");
 
-       dev->iseg_base = pci_resource_start(dev->pdev, 0);
+       dev->iseg_base = dev->bar_addr;
        dev->iseg = ioremap(dev->iseg_base, sizeof(*dev->iseg));
        if (!dev->iseg) {
                err = -ENOMEM;
-               dev_err(&pdev->dev, "Failed mapping initialization segment, aborting\n");
+               mlx5_core_err(dev, "Failed mapping initialization segment, aborting\n");
                goto err_clr_master;
        }
 
@@ -797,52 +769,47 @@ static int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
        release_bar(dev->pdev);
 err_disable:
        mlx5_pci_disable_device(dev);
-
-err_dbg:
-       debugfs_remove(priv->dbg_root);
        return err;
 }
 
-static void mlx5_pci_close(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
+static void mlx5_pci_close(struct mlx5_core_dev *dev)
 {
        iounmap(dev->iseg);
        pci_clear_master(dev->pdev);
        release_bar(dev->pdev);
        mlx5_pci_disable_device(dev);
-       debugfs_remove_recursive(priv->dbg_root);
 }
 
-static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
+static int mlx5_init_once(struct mlx5_core_dev *dev)
 {
-       struct pci_dev *pdev = dev->pdev;
        int err;
 
-       priv->devcom = mlx5_devcom_register_device(dev);
-       if (IS_ERR(priv->devcom))
-               dev_err(&pdev->dev, "failed to register with devcom (0x%p)\n",
-                       priv->devcom);
+       dev->priv.devcom = mlx5_devcom_register_device(dev);
+       if (IS_ERR(dev->priv.devcom))
+               mlx5_core_err(dev, "failed to register with devcom (0x%p)\n",
+                             dev->priv.devcom);
 
        err = mlx5_query_board_id(dev);
        if (err) {
-               dev_err(&pdev->dev, "query board id failed\n");
+               mlx5_core_err(dev, "query board id failed\n");
                goto err_devcom;
        }
 
        err = mlx5_eq_table_init(dev);
        if (err) {
-               dev_err(&pdev->dev, "failed to initialize eq\n");
+               mlx5_core_err(dev, "failed to initialize eq\n");
                goto err_devcom;
        }
 
        err = mlx5_events_init(dev);
        if (err) {
-               dev_err(&pdev->dev, "failed to initialize events\n");
+               mlx5_core_err(dev, "failed to initialize events\n");
                goto err_eq_cleanup;
        }
 
        err = mlx5_cq_debugfs_init(dev);
        if (err) {
-               dev_err(&pdev->dev, "failed to initialize cq debugfs\n");
+               mlx5_core_err(dev, "failed to initialize cq debugfs\n");
                goto err_events_cleanup;
        }
 
@@ -858,31 +825,31 @@ static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
 
        err = mlx5_init_rl_table(dev);
        if (err) {
-               dev_err(&pdev->dev, "Failed to init rate limiting\n");
+               mlx5_core_err(dev, "Failed to init rate limiting\n");
                goto err_tables_cleanup;
        }
 
        err = mlx5_mpfs_init(dev);
        if (err) {
-               dev_err(&pdev->dev, "Failed to init l2 table %d\n", err);
+               mlx5_core_err(dev, "Failed to init l2 table %d\n", err);
                goto err_rl_cleanup;
        }
 
        err = mlx5_eswitch_init(dev);
        if (err) {
-               dev_err(&pdev->dev, "Failed to init eswitch %d\n", err);
+               mlx5_core_err(dev, "Failed to init eswitch %d\n", err);
                goto err_mpfs_cleanup;
        }
 
        err = mlx5_sriov_init(dev);
        if (err) {
-               dev_err(&pdev->dev, "Failed to init sriov %d\n", err);
+               mlx5_core_err(dev, "Failed to init sriov %d\n", err);
                goto err_eswitch_cleanup;
        }
 
        err = mlx5_fpga_init(dev);
        if (err) {
-               dev_err(&pdev->dev, "Failed to init fpga device %d\n", err);
+               mlx5_core_err(dev, "Failed to init fpga device %d\n", err);
                goto err_sriov_cleanup;
        }
 
@@ -932,93 +899,78 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
        mlx5_devcom_unregister_device(dev->priv.devcom);
 }
 
-static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
-                        bool boot)
+static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot)
 {
-       struct pci_dev *pdev = dev->pdev;
        int err;
 
-       dev->caps.embedded_cpu = mlx5_read_embedded_cpu(dev);
-       mutex_lock(&dev->intf_state_mutex);
-       if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
-               dev_warn(&dev->pdev->dev, "%s: interface is up, NOP\n",
-                        __func__);
-               goto out;
-       }
-
-       dev_info(&pdev->dev, "firmware version: %d.%d.%d\n", fw_rev_maj(dev),
-                fw_rev_min(dev), fw_rev_sub(dev));
+       mlx5_core_info(dev, "firmware version: %d.%d.%d\n", fw_rev_maj(dev),
+                      fw_rev_min(dev), fw_rev_sub(dev));
 
        /* Only PFs hold the relevant PCIe information for this query */
        if (mlx5_core_is_pf(dev))
                pcie_print_link_status(dev->pdev);
 
-       /* on load removing any previous indication of internal error, device is
-        * up
-        */
-       dev->state = MLX5_DEVICE_STATE_UP;
-
        /* wait for firmware to accept initialization segments configurations
         */
        err = wait_fw_init(dev, FW_PRE_INIT_TIMEOUT_MILI);
        if (err) {
-               dev_err(&dev->pdev->dev, "Firmware over %d MS in pre-initializing state, aborting\n",
-                       FW_PRE_INIT_TIMEOUT_MILI);
-               goto out_err;
+               mlx5_core_err(dev, "Firmware over %d MS in pre-initializing state, aborting\n",
+                             FW_PRE_INIT_TIMEOUT_MILI);
+               return err;
        }
 
        err = mlx5_cmd_init(dev);
        if (err) {
-               dev_err(&pdev->dev, "Failed initializing command interface, aborting\n");
-               goto out_err;
+               mlx5_core_err(dev, "Failed initializing command interface, aborting\n");
+               return err;
        }
 
        err = wait_fw_init(dev, FW_INIT_TIMEOUT_MILI);
        if (err) {
-               dev_err(&dev->pdev->dev, "Firmware over %d MS in initializing state, aborting\n",
-                       FW_INIT_TIMEOUT_MILI);
+               mlx5_core_err(dev, "Firmware over %d MS in initializing state, aborting\n",
+                             FW_INIT_TIMEOUT_MILI);
                goto err_cmd_cleanup;
        }
 
        err = mlx5_core_enable_hca(dev, 0);
        if (err) {
-               dev_err(&pdev->dev, "enable hca failed\n");
+               mlx5_core_err(dev, "enable hca failed\n");
                goto err_cmd_cleanup;
        }
 
        err = mlx5_core_set_issi(dev);
        if (err) {
-               dev_err(&pdev->dev, "failed to set issi\n");
+               mlx5_core_err(dev, "failed to set issi\n");
                goto err_disable_hca;
        }
 
        err = mlx5_satisfy_startup_pages(dev, 1);
        if (err) {
-               dev_err(&pdev->dev, "failed to allocate boot pages\n");
+               mlx5_core_err(dev, "failed to allocate boot pages\n");
                goto err_disable_hca;
        }
 
        err = set_hca_ctrl(dev);
        if (err) {
-               dev_err(&pdev->dev, "set_hca_ctrl failed\n");
+               mlx5_core_err(dev, "set_hca_ctrl failed\n");
                goto reclaim_boot_pages;
        }
 
        err = set_hca_cap(dev);
        if (err) {
-               dev_err(&pdev->dev, "set_hca_cap failed\n");
+               mlx5_core_err(dev, "set_hca_cap failed\n");
                goto reclaim_boot_pages;
        }
 
        err = mlx5_satisfy_startup_pages(dev, 0);
        if (err) {
-               dev_err(&pdev->dev, "failed to allocate init pages\n");
+               mlx5_core_err(dev, "failed to allocate init pages\n");
                goto reclaim_boot_pages;
        }
 
        err = mlx5_cmd_init_hca(dev, sw_owner_id);
        if (err) {
-               dev_err(&pdev->dev, "init hca failed\n");
+               mlx5_core_err(dev, "init hca failed\n");
                goto reclaim_boot_pages;
        }
 
@@ -1028,23 +980,50 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
 
        err = mlx5_query_hca_caps(dev);
        if (err) {
-               dev_err(&pdev->dev, "query hca failed\n");
-               goto err_stop_poll;
+               mlx5_core_err(dev, "query hca failed\n");
+               goto stop_health;
        }
 
-       if (boot) {
-               err = mlx5_init_once(dev, priv);
-               if (err) {
-                       dev_err(&pdev->dev, "sw objs init failed\n");
-                       goto err_stop_poll;
-               }
+       return 0;
+
+stop_health:
+       mlx5_stop_health_poll(dev, boot);
+reclaim_boot_pages:
+       mlx5_reclaim_startup_pages(dev);
+err_disable_hca:
+       mlx5_core_disable_hca(dev, 0);
+err_cmd_cleanup:
+       mlx5_cmd_cleanup(dev);
+
+       return err;
+}
+
+static int mlx5_function_teardown(struct mlx5_core_dev *dev, bool boot)
+{
+       int err;
+
+       mlx5_stop_health_poll(dev, boot);
+       err = mlx5_cmd_teardown_hca(dev);
+       if (err) {
+               mlx5_core_err(dev, "tear_down_hca failed, skip cleanup\n");
+               return err;
        }
+       mlx5_reclaim_startup_pages(dev);
+       mlx5_core_disable_hca(dev, 0);
+       mlx5_cmd_cleanup(dev);
+
+       return 0;
+}
+
+static int mlx5_load(struct mlx5_core_dev *dev)
+{
+       int err;
 
        dev->priv.uar = mlx5_get_uars_page(dev);
        if (IS_ERR(dev->priv.uar)) {
-               dev_err(&pdev->dev, "Failed allocating uar, aborting\n");
+               mlx5_core_err(dev, "Failed allocating uar, aborting\n");
                err = PTR_ERR(dev->priv.uar);
-               goto err_get_uars;
+               return err;
        }
 
        mlx5_events_start(dev);
@@ -1052,132 +1031,155 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
 
        err = mlx5_eq_table_create(dev);
        if (err) {
-               dev_err(&pdev->dev, "Failed to create EQs\n");
+               mlx5_core_err(dev, "Failed to create EQs\n");
                goto err_eq_table;
        }
 
        err = mlx5_fw_tracer_init(dev->tracer);
        if (err) {
-               dev_err(&pdev->dev, "Failed to init FW tracer\n");
+               mlx5_core_err(dev, "Failed to init FW tracer\n");
                goto err_fw_tracer;
        }
 
        err = mlx5_fpga_device_start(dev);
        if (err) {
-               dev_err(&pdev->dev, "fpga device start failed %d\n", err);
+               mlx5_core_err(dev, "fpga device start failed %d\n", err);
                goto err_fpga_start;
        }
 
        err = mlx5_accel_ipsec_init(dev);
        if (err) {
-               dev_err(&pdev->dev, "IPSec device start failed %d\n", err);
+               mlx5_core_err(dev, "IPSec device start failed %d\n", err);
                goto err_ipsec_start;
        }
 
        err = mlx5_accel_tls_init(dev);
        if (err) {
-               dev_err(&pdev->dev, "TLS device start failed %d\n", err);
+               mlx5_core_err(dev, "TLS device start failed %d\n", err);
                goto err_tls_start;
        }
 
        err = mlx5_init_fs(dev);
        if (err) {
-               dev_err(&pdev->dev, "Failed to init flow steering\n");
+               mlx5_core_err(dev, "Failed to init flow steering\n");
                goto err_fs;
        }
 
        err = mlx5_core_set_hca_defaults(dev);
        if (err) {
-               dev_err(&pdev->dev, "Failed to set hca defaults\n");
+               mlx5_core_err(dev, "Failed to set hca defaults\n");
                goto err_fs;
        }
 
        err = mlx5_sriov_attach(dev);
        if (err) {
-               dev_err(&pdev->dev, "sriov init failed %d\n", err);
+               mlx5_core_err(dev, "sriov init failed %d\n", err);
                goto err_sriov;
        }
 
        err = mlx5_ec_init(dev);
        if (err) {
-               dev_err(&pdev->dev, "Failed to init embedded CPU\n");
+               mlx5_core_err(dev, "Failed to init embedded CPU\n");
                goto err_ec;
        }
 
-       if (mlx5_device_registered(dev)) {
-               mlx5_attach_device(dev);
-       } else {
-               err = mlx5_register_device(dev);
-               if (err) {
-                       dev_err(&pdev->dev, "mlx5_register_device failed %d\n", err);
-                       goto err_reg_dev;
-               }
-       }
-
-       set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
-out:
-       mutex_unlock(&dev->intf_state_mutex);
-
        return 0;
 
-err_reg_dev:
-       mlx5_ec_cleanup(dev);
-
 err_ec:
        mlx5_sriov_detach(dev);
-
 err_sriov:
        mlx5_cleanup_fs(dev);
-
 err_fs:
        mlx5_accel_tls_cleanup(dev);
-
 err_tls_start:
        mlx5_accel_ipsec_cleanup(dev);
-
 err_ipsec_start:
        mlx5_fpga_device_stop(dev);
-
 err_fpga_start:
        mlx5_fw_tracer_cleanup(dev->tracer);
-
 err_fw_tracer:
        mlx5_eq_table_destroy(dev);
-
 err_eq_table:
        mlx5_pagealloc_stop(dev);
        mlx5_events_stop(dev);
-       mlx5_put_uars_page(dev, priv->uar);
+       mlx5_put_uars_page(dev, dev->priv.uar);
+       return err;
+}
 
-err_get_uars:
-       if (boot)
-               mlx5_cleanup_once(dev);
+static void mlx5_unload(struct mlx5_core_dev *dev)
+{
+       mlx5_ec_cleanup(dev);
+       mlx5_sriov_detach(dev);
+       mlx5_cleanup_fs(dev);
+       mlx5_accel_ipsec_cleanup(dev);
+       mlx5_accel_tls_cleanup(dev);
+       mlx5_fpga_device_stop(dev);
+       mlx5_fw_tracer_cleanup(dev->tracer);
+       mlx5_eq_table_destroy(dev);
+       mlx5_pagealloc_stop(dev);
+       mlx5_events_stop(dev);
+       mlx5_put_uars_page(dev, dev->priv.uar);
+}
 
-err_stop_poll:
-       mlx5_stop_health_poll(dev, boot);
-       if (mlx5_cmd_teardown_hca(dev)) {
-               dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n");
-               goto out_err;
+static int mlx5_load_one(struct mlx5_core_dev *dev, bool boot)
+{
+       int err = 0;
+
+       dev->caps.embedded_cpu = mlx5_read_embedded_cpu(dev);
+       mutex_lock(&dev->intf_state_mutex);
+       if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
+               mlx5_core_warn(dev, "interface is up, NOP\n");
+               goto out;
        }
+       /* remove any previous indication of internal error */
+       dev->state = MLX5_DEVICE_STATE_UP;
 
-reclaim_boot_pages:
-       mlx5_reclaim_startup_pages(dev);
+       err = mlx5_function_setup(dev, boot);
+       if (err)
+               goto out;
 
-err_disable_hca:
-       mlx5_core_disable_hca(dev, 0);
+       if (boot) {
+               err = mlx5_init_once(dev);
+               if (err) {
+                       mlx5_core_err(dev, "sw objs init failed\n");
+                       goto function_teardown;
+               }
+       }
 
-err_cmd_cleanup:
-       mlx5_cmd_cleanup(dev);
+       err = mlx5_load(dev);
+       if (err)
+               goto err_load;
+
+       if (mlx5_device_registered(dev)) {
+               mlx5_attach_device(dev);
+       } else {
+               err = mlx5_register_device(dev);
+               if (err) {
+                       mlx5_core_err(dev, "register device failed %d\n", err);
+                       goto err_reg_dev;
+               }
+       }
+
+       set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
+out:
+       mutex_unlock(&dev->intf_state_mutex);
+
+       return err;
 
-out_err:
+err_reg_dev:
+       mlx5_unload(dev);
+err_load:
+       if (boot)
+               mlx5_cleanup_once(dev);
+function_teardown:
+       mlx5_function_teardown(dev, boot);
        dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
        mutex_unlock(&dev->intf_state_mutex);
 
        return err;
 }
 
-static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
-                          bool cleanup)
+static int mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup)
 {
        int err = 0;
 
@@ -1186,8 +1188,8 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
 
        mutex_lock(&dev->intf_state_mutex);
        if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
-               dev_warn(&dev->pdev->dev, "%s: interface is down, NOP\n",
-                        __func__);
+               mlx5_core_warn(dev, "%s: interface is down, NOP\n",
+                              __func__);
                if (cleanup)
                        mlx5_cleanup_once(dev);
                goto out;
@@ -1198,30 +1200,12 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
        if (mlx5_device_registered(dev))
                mlx5_detach_device(dev);
 
-       mlx5_ec_cleanup(dev);
-       mlx5_sriov_detach(dev);
-       mlx5_cleanup_fs(dev);
-       mlx5_accel_ipsec_cleanup(dev);
-       mlx5_accel_tls_cleanup(dev);
-       mlx5_fpga_device_stop(dev);
-       mlx5_fw_tracer_cleanup(dev->tracer);
-       mlx5_eq_table_destroy(dev);
-       mlx5_pagealloc_stop(dev);
-       mlx5_events_stop(dev);
-       mlx5_put_uars_page(dev, priv->uar);
+       mlx5_unload(dev);
+
        if (cleanup)
                mlx5_cleanup_once(dev);
-       mlx5_stop_health_poll(dev, cleanup);
-
-       err = mlx5_cmd_teardown_hca(dev);
-       if (err) {
-               dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n");
-               goto out;
-       }
-       mlx5_reclaim_startup_pages(dev);
-       mlx5_core_disable_hca(dev, 0);
-       mlx5_cmd_cleanup(dev);
 
+       mlx5_function_teardown(dev, cleanup);
 out:
        mutex_unlock(&dev->intf_state_mutex);
        return err;
@@ -1238,29 +1222,15 @@ static const struct devlink_ops mlx5_devlink_ops = {
 #endif
 };
 
-#define MLX5_IB_MOD "mlx5_ib"
-static int init_one(struct pci_dev *pdev,
-                   const struct pci_device_id *id)
+static int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx, const char *name)
 {
-       struct mlx5_core_dev *dev;
-       struct devlink *devlink;
-       struct mlx5_priv *priv;
+       struct mlx5_priv *priv = &dev->priv;
        int err;
 
-       devlink = devlink_alloc(&mlx5_devlink_ops, sizeof(*dev));
-       if (!devlink) {
-               dev_err(&pdev->dev, "kzalloc failed\n");
-               return -ENOMEM;
-       }
-
-       dev = devlink_priv(devlink);
-       priv = &dev->priv;
-       priv->pci_dev_data = id->driver_data;
-
-       pci_set_drvdata(pdev, dev);
+       strncpy(priv->name, name, MLX5_MAX_NAME_LEN);
+       priv->name[MLX5_MAX_NAME_LEN - 1] = 0;
 
-       dev->pdev = pdev;
-       dev->profile = &profile[prof_sel];
+       dev->profile = &profile[profile_idx];
 
        INIT_LIST_HEAD(&priv->ctx_list);
        spin_lock_init(&priv->ctx_lock);
@@ -1272,25 +1242,72 @@ static int init_one(struct pci_dev *pdev,
        INIT_LIST_HEAD(&priv->bfregs.reg_head.list);
        INIT_LIST_HEAD(&priv->bfregs.wc_head.list);
 
-       err = mlx5_pci_init(dev, priv);
-       if (err) {
-               dev_err(&pdev->dev, "mlx5_pci_init failed with error code %d\n", err);
-               goto clean_dev;
+       mutex_init(&priv->alloc_mutex);
+       mutex_init(&priv->pgdir_mutex);
+       INIT_LIST_HEAD(&priv->pgdir_list);
+       spin_lock_init(&priv->mkey_lock);
+
+       priv->dbg_root = debugfs_create_dir(name, mlx5_debugfs_root);
+       if (!priv->dbg_root) {
+               pr_err("mlx5_core: %s error, Cannot create debugfs dir, aborting\n", name);
+               return -ENOMEM;
        }
 
        err = mlx5_health_init(dev);
-       if (err) {
-               dev_err(&pdev->dev, "mlx5_health_init failed with error code %d\n", err);
-               goto close_pci;
-       }
+       if (err)
+               goto err_health_init;
 
        err = mlx5_pagealloc_init(dev);
        if (err)
                goto err_pagealloc_init;
 
-       err = mlx5_load_one(dev, priv, true);
+       return 0;
+
+err_pagealloc_init:
+       mlx5_health_cleanup(dev);
+err_health_init:
+       debugfs_remove(dev->priv.dbg_root);
+
+       return err;
+}
+
+static void mlx5_mdev_uninit(struct mlx5_core_dev *dev)
+{
+       mlx5_pagealloc_cleanup(dev);
+       mlx5_health_cleanup(dev);
+       debugfs_remove_recursive(dev->priv.dbg_root);
+}
+
+#define MLX5_IB_MOD "mlx5_ib"
+static int init_one(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+       struct mlx5_core_dev *dev;
+       struct devlink *devlink;
+       int err;
+
+       devlink = devlink_alloc(&mlx5_devlink_ops, sizeof(*dev));
+       if (!devlink) {
+               dev_err(&pdev->dev, "kzalloc failed\n");
+               return -ENOMEM;
+       }
+
+       dev = devlink_priv(devlink);
+
+       err = mlx5_mdev_init(dev, prof_sel, dev_name(&pdev->dev));
+       if (err)
+               goto mdev_init_err;
+
+       err = mlx5_pci_init(dev, pdev, id);
+       if (err) {
+               mlx5_core_err(dev, "mlx5_pci_init failed with error code %d\n",
+                             err);
+               goto pci_init_err;
+       }
+
+       err = mlx5_load_one(dev, true);
        if (err) {
-               dev_err(&pdev->dev, "mlx5_load_one failed with error code %d\n", err);
+               mlx5_core_err(dev, "mlx5_load_one failed with error code %d\n",
+                             err);
                goto err_load_one;
        }
 
@@ -1304,14 +1321,13 @@ static int init_one(struct pci_dev *pdev,
        return 0;
 
 clean_load:
-       mlx5_unload_one(dev, priv, true);
+       mlx5_unload_one(dev, true);
+
 err_load_one:
-       mlx5_pagealloc_cleanup(dev);
-err_pagealloc_init:
-       mlx5_health_cleanup(dev);
-close_pci:
-       mlx5_pci_close(dev, priv);
-clean_dev:
+       mlx5_pci_close(dev);
+pci_init_err:
+       mlx5_mdev_uninit(dev);
+mdev_init_err:
        devlink_free(devlink);
 
        return err;
@@ -1321,20 +1337,18 @@ static void remove_one(struct pci_dev *pdev)
 {
        struct mlx5_core_dev *dev  = pci_get_drvdata(pdev);
        struct devlink *devlink = priv_to_devlink(dev);
-       struct mlx5_priv *priv = &dev->priv;
 
        devlink_unregister(devlink);
        mlx5_unregister_device(dev);
 
-       if (mlx5_unload_one(dev, priv, true)) {
-               dev_err(&dev->pdev->dev, "mlx5_unload_one failed\n");
-               mlx5_health_cleanup(dev);
+       if (mlx5_unload_one(dev, true)) {
+               mlx5_core_err(dev, "mlx5_unload_one failed\n");
+               mlx5_health_flush(dev);
                return;
        }
 
-       mlx5_pagealloc_cleanup(dev);
-       mlx5_health_cleanup(dev);
-       mlx5_pci_close(dev, priv);
+       mlx5_pci_close(dev);
+       mlx5_mdev_uninit(dev);
        devlink_free(devlink);
 }
 
@@ -1342,12 +1356,11 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
                                              pci_channel_state_t state)
 {
        struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
-       struct mlx5_priv *priv = &dev->priv;
 
-       dev_info(&pdev->dev, "%s was called\n", __func__);
+       mlx5_core_info(dev, "%s was called\n", __func__);
 
        mlx5_enter_error_state(dev, false);
-       mlx5_unload_one(dev, priv, false);
+       mlx5_unload_one(dev, false);
        /* In case of kernel call drain the health wq */
        if (state) {
                mlx5_drain_health_wq(dev);
@@ -1374,7 +1387,9 @@ static int wait_vital(struct pci_dev *pdev)
                count = ioread32be(health->health_counter);
                if (count && count != 0xffffffff) {
                        if (last_count && last_count != count) {
-                               dev_info(&pdev->dev, "Counter value 0x%x after %d iterations\n", count, i);
+                               mlx5_core_info(dev,
+                                              "wait vital counter value 0x%x after %d iterations\n",
+                                              count, i);
                                return 0;
                        }
                        last_count = count;
@@ -1390,12 +1405,12 @@ static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
        struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
        int err;
 
-       dev_info(&pdev->dev, "%s was called\n", __func__);
+       mlx5_core_info(dev, "%s was called\n", __func__);
 
        err = mlx5_pci_enable_device(dev);
        if (err) {
-               dev_err(&pdev->dev, "%s: mlx5_pci_enable_device failed with error code: %d\n"
-                       , __func__, err);
+               mlx5_core_err(dev, "%s: mlx5_pci_enable_device failed with error code: %d\n",
+                             __func__, err);
                return PCI_ERS_RESULT_DISCONNECT;
        }
 
@@ -1404,7 +1419,7 @@ static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
        pci_save_state(pdev);
 
        if (wait_vital(pdev)) {
-               dev_err(&pdev->dev, "%s: wait_vital timed out\n", __func__);
+               mlx5_core_err(dev, "%s: wait_vital timed out\n", __func__);
                return PCI_ERS_RESULT_DISCONNECT;
        }
 
@@ -1414,17 +1429,16 @@ static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
 static void mlx5_pci_resume(struct pci_dev *pdev)
 {
        struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
-       struct mlx5_priv *priv = &dev->priv;
        int err;
 
-       dev_info(&pdev->dev, "%s was called\n", __func__);
+       mlx5_core_info(dev, "%s was called\n", __func__);
 
-       err = mlx5_load_one(dev, priv, false);
+       err = mlx5_load_one(dev, false);
        if (err)
-               dev_err(&pdev->dev, "%s: mlx5_load_one failed with error code: %d\n"
-                       , __func__, err);
+               mlx5_core_err(dev, "%s: mlx5_load_one failed with error code: %d\n",
+                             __func__, err);
        else
-               dev_info(&pdev->dev, "%s: device recovered\n", __func__);
+               mlx5_core_info(dev, "%s: device recovered\n", __func__);
 }
 
 static const struct pci_error_handlers mlx5_err_handler = {
@@ -1486,13 +1500,12 @@ static int mlx5_try_fast_unload(struct mlx5_core_dev *dev)
 static void shutdown(struct pci_dev *pdev)
 {
        struct mlx5_core_dev *dev  = pci_get_drvdata(pdev);
-       struct mlx5_priv *priv = &dev->priv;
        int err;
 
-       dev_info(&pdev->dev, "Shutdown was called\n");
+       mlx5_core_info(dev, "Shutdown was called\n");
        err = mlx5_try_fast_unload(dev);
        if (err)
-               mlx5_unload_one(dev, priv, false);
+               mlx5_unload_one(dev, false);
        mlx5_pci_disable_device(dev);
 }
 
index 6fb99be6058471a4f35c26fa50fc7b4f111ff723..8213c994e205fdba527590201e879f1e5395630a 100644 (file)
 extern uint mlx5_core_debug_mask;
 
 #define mlx5_core_dbg(__dev, format, ...)                              \
-       dev_dbg(&(__dev)->pdev->dev, "%s:%d:(pid %d): " format,         \
+       pr_debug("%s:%s:%d:(pid %d): " format, (__dev)->priv.name,      \
                 __func__, __LINE__, current->pid,                      \
                 ##__VA_ARGS__)
 
 #define mlx5_core_dbg_once(__dev, format, ...)                         \
-       dev_dbg_once(&(__dev)->pdev->dev, "%s:%d:(pid %d): " format,    \
+       pr_debug_once("%s:%s:%d:(pid %d): " format, (__dev)->priv.name, \
                     __func__, __LINE__, current->pid,                  \
                     ##__VA_ARGS__)
 
@@ -64,28 +64,37 @@ do {                                                                        \
 } while (0)
 
 #define mlx5_core_err(__dev, format, ...)                              \
-       dev_err(&(__dev)->pdev->dev, "%s:%d:(pid %d): " format, \
+       pr_err("%s:%s:%d:(pid %d): " format, (__dev)->priv.name,        \
                __func__, __LINE__, current->pid,       \
               ##__VA_ARGS__)
 
-#define mlx5_core_err_rl(__dev, format, ...)                           \
-       dev_err_ratelimited(&(__dev)->pdev->dev,                        \
-                          "%s:%d:(pid %d): " format,                   \
-                          __func__, __LINE__, current->pid,            \
+#define mlx5_core_err_rl(__dev, format, ...)                                \
+       pr_err_ratelimited("%s:%s:%d:(pid %d): " format, (__dev)->priv.name, \
+                          __func__, __LINE__, current->pid,                 \
                           ##__VA_ARGS__)
 
 #define mlx5_core_warn(__dev, format, ...)                             \
-       dev_warn(&(__dev)->pdev->dev, "%s:%d:(pid %d): " format,        \
+       pr_warn("%s:%s:%d:(pid %d): " format, (__dev)->priv.name,       \
                 __func__, __LINE__, current->pid,                      \
                ##__VA_ARGS__)
 
 #define mlx5_core_warn_once(__dev, format, ...)                                \
-       dev_warn_once(&(__dev)->pdev->dev, "%s:%d:(pid %d): " format,   \
+       pr_warn_once("%s:%s:%d:(pid %d): " format, (__dev)->priv.name,  \
                      __func__, __LINE__, current->pid,                 \
                      ##__VA_ARGS__)
 
+#define mlx5_core_warn_rl(__dev, format, ...)                                \
+       pr_warn_ratelimited("%s:%s:%d:(pid %d): " format, (__dev)->priv.name, \
+                          __func__, __LINE__, current->pid,                  \
+                          ##__VA_ARGS__)
+
 #define mlx5_core_info(__dev, format, ...)                             \
-       dev_info(&(__dev)->pdev->dev, format, ##__VA_ARGS__)
+       pr_info("%s " format, (__dev)->priv.name, ##__VA_ARGS__)
+
+#define mlx5_core_info_rl(__dev, format, ...)                                \
+       pr_info_ratelimited("%s:%s:%d:(pid %d): " format, (__dev)->priv.name, \
+                          __func__, __LINE__, current->pid,                  \
+                          ##__VA_ARGS__)
 
 enum {
        MLX5_CMD_DATA, /* print command payload only */
index 94464723ff77de3c112fde55e61490675572c7e1..0d006224d7b057ecfdb9115f0835259ca1165494 100644 (file)
@@ -79,7 +79,7 @@ static u64 uar2pfn(struct mlx5_core_dev *mdev, u32 index)
        else
                system_page_index = index;
 
-       return (pci_resource_start(mdev->pdev, 0) >> PAGE_SHIFT) + system_page_index;
+       return (mdev->bar_addr >> PAGE_SHIFT) + system_page_index;
 }
 
 static void up_rel_func(struct kref *kref)
index e55b4aa91e3bcc0a40980ef18639e44c413fb757..9e8e3e92f3693507bc9a0a5e72a3b425c52c2e00 100644 (file)
@@ -568,7 +568,7 @@ static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
        if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
                return 0;
 
-       emad_wq = alloc_workqueue("mlxsw_core_emad", WQ_MEM_RECLAIM, 0);
+       emad_wq = alloc_workqueue("mlxsw_core_emad", 0, 0);
        if (!emad_wq)
                return -ENOMEM;
        mlxsw_core->emad_wq = emad_wq;
@@ -934,6 +934,46 @@ mlxsw_devlink_sb_occ_tc_port_bind_get(struct devlink_port *devlink_port,
                                                     pool_type, p_cur, p_max);
 }
 
+static int
+mlxsw_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
+                      struct netlink_ext_ack *extack)
+{
+       struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+       char fw_info_psid[MLXSW_REG_MGIR_FW_INFO_PSID_SIZE];
+       u32 hw_rev, fw_major, fw_minor, fw_sub_minor;
+       char mgir_pl[MLXSW_REG_MGIR_LEN];
+       char buf[32];
+       int err;
+
+       err = devlink_info_driver_name_put(req,
+                                          mlxsw_core->bus_info->device_kind);
+       if (err)
+               return err;
+
+       mlxsw_reg_mgir_pack(mgir_pl);
+       err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mgir), mgir_pl);
+       if (err)
+               return err;
+       mlxsw_reg_mgir_unpack(mgir_pl, &hw_rev, fw_info_psid, &fw_major,
+                             &fw_minor, &fw_sub_minor);
+
+       sprintf(buf, "%X", hw_rev);
+       err = devlink_info_version_fixed_put(req, "hw.revision", buf);
+       if (err)
+               return err;
+
+       err = devlink_info_version_fixed_put(req, "fw.psid", fw_info_psid);
+       if (err)
+               return err;
+
+       sprintf(buf, "%d.%d.%d", fw_major, fw_minor, fw_sub_minor);
+       err = devlink_info_version_running_put(req, "fw.version", buf);
+       if (err)
+               return err;
+
+       return 0;
+}
+
 static int mlxsw_devlink_core_bus_device_reload(struct devlink *devlink,
                                                struct netlink_ext_ack *extack)
 {
@@ -968,6 +1008,7 @@ static const struct devlink_ops mlxsw_devlink_ops = {
        .sb_occ_max_clear               = mlxsw_devlink_sb_occ_max_clear,
        .sb_occ_port_pool_get           = mlxsw_devlink_sb_occ_port_pool_get,
        .sb_occ_tc_port_bind_get        = mlxsw_devlink_sb_occ_tc_port_bind_get,
+       .info_get                       = mlxsw_devlink_info_get,
 };
 
 static int
@@ -1720,7 +1761,9 @@ EXPORT_SYMBOL(mlxsw_core_res_get);
 
 int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port,
                         u32 port_number, bool split,
-                        u32 split_port_subnumber)
+                        u32 split_port_subnumber,
+                        const unsigned char *switch_id,
+                        unsigned char switch_id_len)
 {
        struct devlink *devlink = priv_to_devlink(mlxsw_core);
        struct mlxsw_core_port *mlxsw_core_port =
@@ -1730,7 +1773,8 @@ int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port,
 
        mlxsw_core_port->local_port = local_port;
        devlink_port_attrs_set(devlink_port, DEVLINK_PORT_FLAVOUR_PHYSICAL,
-                              port_number, split, split_port_subnumber);
+                              port_number, split, split_port_subnumber,
+                              switch_id, switch_id_len);
        err = devlink_port_register(devlink, devlink_port, local_port);
        if (err)
                memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port));
@@ -1960,10 +2004,10 @@ static int __init mlxsw_core_module_init(void)
 {
        int err;
 
-       mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, WQ_MEM_RECLAIM, 0);
+       mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, 0, 0);
        if (!mlxsw_wq)
                return -ENOMEM;
-       mlxsw_owq = alloc_ordered_workqueue("%s_ordered", WQ_MEM_RECLAIM,
+       mlxsw_owq = alloc_ordered_workqueue("%s_ordered", 0,
                                            mlxsw_core_driver_name);
        if (!mlxsw_owq) {
                err = -ENOMEM;
index e8c424da534c0c4ee31124e3ac4ab0ec5a38deca..d51dfc3560b60c047d6e95e1fa0c486639ec0e25 100644 (file)
@@ -166,7 +166,9 @@ void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core,
 void *mlxsw_core_port_driver_priv(struct mlxsw_core_port *mlxsw_core_port);
 int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port,
                         u32 port_number, bool split,
-                        u32 split_port_subnumber);
+                        u32 split_port_subnumber,
+                        const unsigned char *switch_id,
+                        unsigned char switch_id_len);
 void mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u8 local_port);
 void mlxsw_core_port_eth_set(struct mlxsw_core *mlxsw_core, u8 local_port,
                             void *port_driver_priv, struct net_device *dev);
index ec5f5a66b6073d778f80a07be25c9cbf69c100ba..cf2114273b72aae2446a233f2c945246857492f5 100644 (file)
@@ -51,18 +51,6 @@ static int mlxsw_m_port_dummy_open_stop(struct net_device *dev)
        return 0;
 }
 
-static int mlxsw_m_port_get_port_parent_id(struct net_device *dev,
-                                          struct netdev_phys_item_id *ppid)
-{
-       struct mlxsw_m_port *mlxsw_m_port = netdev_priv(dev);
-       struct mlxsw_m *mlxsw_m = mlxsw_m_port->mlxsw_m;
-
-       ppid->id_len = sizeof(mlxsw_m->base_mac);
-       memcpy(&ppid->id, &mlxsw_m->base_mac, ppid->id_len);
-
-       return 0;
-}
-
 static struct devlink_port *
 mlxsw_m_port_get_devlink_port(struct net_device *dev)
 {
@@ -76,7 +64,6 @@ mlxsw_m_port_get_devlink_port(struct net_device *dev)
 static const struct net_device_ops mlxsw_m_port_netdev_ops = {
        .ndo_open               = mlxsw_m_port_dummy_open_stop,
        .ndo_stop               = mlxsw_m_port_dummy_open_stop,
-       .ndo_get_port_parent_id = mlxsw_m_port_get_port_parent_id,
        .ndo_get_devlink_port   = mlxsw_m_port_get_devlink_port,
 };
 
@@ -151,7 +138,9 @@ mlxsw_m_port_create(struct mlxsw_m *mlxsw_m, u8 local_port, u8 module)
        int err;
 
        err = mlxsw_core_port_init(mlxsw_m->core, local_port,
-                                  module + 1, false, 0);
+                                  module + 1, false, 0,
+                                  mlxsw_m->base_mac,
+                                  sizeof(mlxsw_m->base_mac));
        if (err) {
                dev_err(mlxsw_m->bus_info->dev, "Port %d: Failed to init core port\n",
                        local_port);
index eb4c5e8964cd6210af966d5347cedf9fb64cc53d..e1ee7f4994dbf9014de35c958498e74b927a7c1d 100644 (file)
@@ -8534,6 +8534,60 @@ static inline void mlxsw_reg_mpar_pack(char *payload, u8 local_port,
        mlxsw_reg_mpar_pa_id_set(payload, pa_id);
 }
 
+/* MGIR - Management General Information Register
+ * ----------------------------------------------
+ * MGIR register allows software to query the hardware and firmware general
+ * information.
+ */
+#define MLXSW_REG_MGIR_ID 0x9020
+#define MLXSW_REG_MGIR_LEN 0x9C
+
+MLXSW_REG_DEFINE(mgir, MLXSW_REG_MGIR_ID, MLXSW_REG_MGIR_LEN);
+
+/* reg_mgir_hw_info_device_hw_revision
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mgir, hw_info_device_hw_revision, 0x0, 16, 16);
+
+#define MLXSW_REG_MGIR_FW_INFO_PSID_SIZE 16
+
+/* reg_mgir_fw_info_psid
+ * PSID (ASCII string).
+ * Access: RO
+ */
+MLXSW_ITEM_BUF(reg, mgir, fw_info_psid, 0x30, MLXSW_REG_MGIR_FW_INFO_PSID_SIZE);
+
+/* reg_mgir_fw_info_extended_major
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mgir, fw_info_extended_major, 0x44, 0, 32);
+
+/* reg_mgir_fw_info_extended_minor
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mgir, fw_info_extended_minor, 0x48, 0, 32);
+
+/* reg_mgir_fw_info_extended_sub_minor
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mgir, fw_info_extended_sub_minor, 0x4C, 0, 32);
+
+static inline void mlxsw_reg_mgir_pack(char *payload)
+{
+       MLXSW_REG_ZERO(mgir, payload);
+}
+
+static inline void
+mlxsw_reg_mgir_unpack(char *payload, u32 *hw_rev, char *fw_info_psid,
+                     u32 *fw_major, u32 *fw_minor, u32 *fw_sub_minor)
+{
+       *hw_rev = mlxsw_reg_mgir_hw_info_device_hw_revision_get(payload);
+       mlxsw_reg_mgir_fw_info_psid_memcpy_from(payload, fw_info_psid);
+       *fw_major = mlxsw_reg_mgir_fw_info_extended_major_get(payload);
+       *fw_minor = mlxsw_reg_mgir_fw_info_extended_minor_get(payload);
+       *fw_sub_minor = mlxsw_reg_mgir_fw_info_extended_sub_minor_get(payload);
+}
+
 /* MRSR - Management Reset and Shutdown Register
  * ---------------------------------------------
  * MRSR register is used to reset or shutdown the switch or
@@ -9958,6 +10012,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
        MLXSW_REG(mcia),
        MLXSW_REG(mpat),
        MLXSW_REG(mpar),
+       MLXSW_REG(mgir),
        MLXSW_REG(mrsr),
        MLXSW_REG(mlcr),
        MLXSW_REG(mpsc),
index 8b9a6870dbc2552d4980a900f0affacfc08765b5..fc325f1213fb2f39a8ea2130e6701a8bbf668fc8 100644 (file)
@@ -1704,18 +1704,6 @@ static int mlxsw_sp_set_features(struct net_device *dev,
                                       mlxsw_sp_feature_hw_tc);
 }
 
-static int mlxsw_sp_port_get_port_parent_id(struct net_device *dev,
-                                           struct netdev_phys_item_id *ppid)
-{
-       struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
-       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
-
-       ppid->id_len = sizeof(mlxsw_sp->base_mac);
-       memcpy(&ppid->id, &mlxsw_sp->base_mac, ppid->id_len);
-
-       return 0;
-}
-
 static struct devlink_port *
 mlxsw_sp_port_get_devlink_port(struct net_device *dev)
 {
@@ -1740,7 +1728,6 @@ static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
        .ndo_vlan_rx_add_vid    = mlxsw_sp_port_add_vid,
        .ndo_vlan_rx_kill_vid   = mlxsw_sp_port_kill_vid,
        .ndo_set_features       = mlxsw_sp_set_features,
-       .ndo_get_port_parent_id = mlxsw_sp_port_get_port_parent_id,
        .ndo_get_devlink_port   = mlxsw_sp_port_get_devlink_port,
 };
 
@@ -3392,7 +3379,9 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
        int err;
 
        err = mlxsw_core_port_init(mlxsw_sp->core, local_port,
-                                  module + 1, split, lane / width);
+                                  module + 1, split, lane / width,
+                                  mlxsw_sp->base_mac,
+                                  sizeof(mlxsw_sp->base_mac));
        if (err) {
                dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
                        local_port);
index 8811f6513e36f7cddd13dc77580f6921065e105f..e993159e8e4cd7f8c16633ebcdc06a87a3a67be9 100644 (file)
@@ -216,7 +216,6 @@ struct mlxsw_sp_acl_tcam_vregion {
                struct mlxsw_sp_acl_tcam_rehash_ctx ctx;
        } rehash;
        struct mlxsw_sp *mlxsw_sp;
-       bool failed_rollback; /* Indicates failed rollback during migration */
        unsigned int ref_count;
 };
 
@@ -1256,11 +1255,8 @@ mlxsw_sp_acl_tcam_vchunk_migrate_start(struct mlxsw_sp *mlxsw_sp,
        struct mlxsw_sp_acl_tcam_chunk *new_chunk;
 
        new_chunk = mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp, vchunk, region);
-       if (IS_ERR(new_chunk)) {
-               if (ctx->this_is_rollback)
-                       vchunk->vregion->failed_rollback = true;
+       if (IS_ERR(new_chunk))
                return PTR_ERR(new_chunk);
-       }
        vchunk->chunk2 = vchunk->chunk;
        vchunk->chunk = new_chunk;
        ctx->current_vchunk = vchunk;
@@ -1318,8 +1314,13 @@ mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp,
                err = mlxsw_sp_acl_tcam_ventry_migrate(mlxsw_sp, ventry,
                                                       vchunk->chunk, credits);
                if (err) {
-                       if (ctx->this_is_rollback)
+                       if (ctx->this_is_rollback) {
+                               /* Save the ventry which we ended with and try
+                                * to continue later on.
+                                */
+                               ctx->start_ventry = ventry;
                                return err;
+                       }
                        /* Swap the chunk and chunk2 pointers so the follow-up
                         * rollback call will see the original chunk pointer
                         * in vchunk->chunk.
@@ -1397,8 +1398,12 @@ mlxsw_sp_acl_tcam_vregion_migrate(struct mlxsw_sp *mlxsw_sp,
                ctx->this_is_rollback = true;
                err2 = mlxsw_sp_acl_tcam_vchunk_migrate_all(mlxsw_sp, vregion,
                                                            ctx, credits);
-               if (err2)
-                       vregion->failed_rollback = true;
+               if (err2) {
+                       trace_mlxsw_sp_acl_tcam_vregion_rehash_rollback_failed(mlxsw_sp,
+                                                                              vregion);
+                       dev_err(mlxsw_sp->bus_info->dev, "Failed to rollback during vregion migration fail\n");
+                       /* Let the rollback to be continued later on. */
+               }
        }
        mutex_unlock(&vregion->lock);
        trace_mlxsw_sp_acl_tcam_vregion_migrate_end(mlxsw_sp, vregion);
@@ -1423,8 +1428,6 @@ mlxsw_sp_acl_tcam_vregion_rehash_start(struct mlxsw_sp *mlxsw_sp,
        int err;
 
        trace_mlxsw_sp_acl_tcam_vregion_rehash(mlxsw_sp, vregion);
-       if (vregion->failed_rollback)
-               return -EBUSY;
 
        hints_priv = ops->region_rehash_hints_get(vregion->region->priv);
        if (IS_ERR(hints_priv))
@@ -1471,11 +1474,9 @@ mlxsw_sp_acl_tcam_vregion_rehash_end(struct mlxsw_sp *mlxsw_sp,
        struct mlxsw_sp_acl_tcam_region *unused_region = vregion->region2;
        const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
 
-       if (!vregion->failed_rollback) {
-               vregion->region2 = NULL;
-               mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp, unused_region);
-               mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, unused_region);
-       }
+       vregion->region2 = NULL;
+       mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp, unused_region);
+       mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, unused_region);
        ops->region_rehash_hints_put(ctx->hints_priv);
        ctx->hints_priv = NULL;
 }
@@ -1506,11 +1507,6 @@ mlxsw_sp_acl_tcam_vregion_rehash(struct mlxsw_sp *mlxsw_sp,
                                                ctx, credits);
        if (err) {
                dev_err(mlxsw_sp->bus_info->dev, "Failed to migrate vregion\n");
-               if (vregion->failed_rollback) {
-                       trace_mlxsw_sp_acl_tcam_vregion_rehash_dis(mlxsw_sp,
-                                                                  vregion);
-                       dev_err(mlxsw_sp->bus_info->dev, "Failed to rollback during vregion migration fail\n");
-               }
        }
 
        if (*credits >= 0)
index 9a79b5e1159743a9b619407cd3b9b9af065c97ca..d633bef5f10512269547c00f718f552720dd29a3 100644 (file)
@@ -70,6 +70,7 @@ static const struct mlxsw_sp_sb_pool_des mlxsw_sp2_sb_pool_dess[] = {
        {MLXSW_REG_SBXX_DIR_EGRESS, 1},
        {MLXSW_REG_SBXX_DIR_EGRESS, 2},
        {MLXSW_REG_SBXX_DIR_EGRESS, 3},
+       {MLXSW_REG_SBXX_DIR_EGRESS, 15},
 };
 
 #define MLXSW_SP_SB_ING_TC_COUNT 8
@@ -428,6 +429,7 @@ static const struct mlxsw_sp_sb_pr mlxsw_sp2_sb_prs[] = {
        MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
        MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
        MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
+       MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, MLXSW_SP_SB_INFI),
 };
 
 static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp,
@@ -517,14 +519,14 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp2_sb_cms_egress[] = {
        MLXSW_SP_SB_CM(0, 7, 4),
        MLXSW_SP_SB_CM(0, 7, 4),
        MLXSW_SP_SB_CM(0, 7, 4),
-       MLXSW_SP_SB_CM(0, 7, 4),
-       MLXSW_SP_SB_CM(0, 7, 4),
-       MLXSW_SP_SB_CM(0, 7, 4),
-       MLXSW_SP_SB_CM(0, 7, 4),
-       MLXSW_SP_SB_CM(0, 7, 4),
-       MLXSW_SP_SB_CM(0, 7, 4),
-       MLXSW_SP_SB_CM(0, 7, 4),
-       MLXSW_SP_SB_CM(0, 7, 4),
+       MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
+       MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
+       MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
+       MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
+       MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
+       MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
+       MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
+       MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
        MLXSW_SP_SB_CM(1, 0xff, 4),
 };
 
@@ -671,6 +673,7 @@ static const struct mlxsw_sp_sb_pm mlxsw_sp2_sb_pms[] = {
        MLXSW_SP_SB_PM(0, 0),
        MLXSW_SP_SB_PM(0, 0),
        MLXSW_SP_SB_PM(0, 0),
+       MLXSW_SP_SB_PM(10000, 90000),
 };
 
 static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port)
index 0ba9daa05a52194553cf607d27824b89809ec6de..64498c9f55abfc42fcad714984d6b1bd51974f66 100644 (file)
@@ -2371,7 +2371,7 @@ static enum mlxsw_reg_rauht_op mlxsw_sp_rauht_op(bool adding)
                        MLXSW_REG_RAUHT_OP_WRITE_DELETE;
 }
 
-static void
+static int
 mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp *mlxsw_sp,
                                struct mlxsw_sp_neigh_entry *neigh_entry,
                                enum mlxsw_reg_rauht_op op)
@@ -2385,10 +2385,10 @@ mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp *mlxsw_sp,
        if (neigh_entry->counter_valid)
                mlxsw_reg_rauht_pack_counter(rauht_pl,
                                             neigh_entry->counter_index);
-       mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
+       return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
 }
 
-static void
+static int
 mlxsw_sp_router_neigh_entry_op6(struct mlxsw_sp *mlxsw_sp,
                                struct mlxsw_sp_neigh_entry *neigh_entry,
                                enum mlxsw_reg_rauht_op op)
@@ -2402,7 +2402,7 @@ mlxsw_sp_router_neigh_entry_op6(struct mlxsw_sp *mlxsw_sp,
        if (neigh_entry->counter_valid)
                mlxsw_reg_rauht_pack_counter(rauht_pl,
                                             neigh_entry->counter_index);
-       mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
+       return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
 }
 
 bool mlxsw_sp_neigh_ipv6_ignore(struct mlxsw_sp_neigh_entry *neigh_entry)
@@ -2424,20 +2424,33 @@ mlxsw_sp_neigh_entry_update(struct mlxsw_sp *mlxsw_sp,
                            struct mlxsw_sp_neigh_entry *neigh_entry,
                            bool adding)
 {
+       enum mlxsw_reg_rauht_op op = mlxsw_sp_rauht_op(adding);
+       int err;
+
        if (!adding && !neigh_entry->connected)
                return;
        neigh_entry->connected = adding;
        if (neigh_entry->key.n->tbl->family == AF_INET) {
-               mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry,
-                                               mlxsw_sp_rauht_op(adding));
+               err = mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry,
+                                                     op);
+               if (err)
+                       return;
        } else if (neigh_entry->key.n->tbl->family == AF_INET6) {
                if (mlxsw_sp_neigh_ipv6_ignore(neigh_entry))
                        return;
-               mlxsw_sp_router_neigh_entry_op6(mlxsw_sp, neigh_entry,
-                                               mlxsw_sp_rauht_op(adding));
+               err = mlxsw_sp_router_neigh_entry_op6(mlxsw_sp, neigh_entry,
+                                                     op);
+               if (err)
+                       return;
        } else {
                WARN_ON_ONCE(1);
+               return;
        }
+
+       if (adding)
+               neigh_entry->key.n->flags |= NTF_OFFLOADED;
+       else
+               neigh_entry->key.n->flags &= ~NTF_OFFLOADED;
 }
 
 void
@@ -4915,7 +4928,7 @@ static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
 static bool mlxsw_sp_fib6_rt_can_mp(const struct fib6_info *rt)
 {
        /* RTF_CACHE routes are ignored */
-       return !(rt->fib6_flags & RTF_ADDRCONF) && rt->fib6_nh.fib_nh_has_gw;
+       return !(rt->fib6_flags & RTF_ADDRCONF) && rt->fib6_nh.fib_nh_gw_family;
 }
 
 static struct fib6_info *
@@ -5055,7 +5068,7 @@ static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp,
 static bool mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp *mlxsw_sp,
                                    const struct fib6_info *rt)
 {
-       return rt->fib6_nh.fib_nh_has_gw ||
+       return rt->fib6_nh.fib_nh_gw_family ||
               mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, NULL);
 }
 
@@ -6092,6 +6105,14 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
                        NL_SET_ERR_MSG_MOD(info->extack, "FIB offload was aborted. Not configuring route");
                        return notifier_from_errno(-EINVAL);
                }
+               if (info->family == AF_INET) {
+                       struct fib_entry_notifier_info *fen_info = ptr;
+
+                       if (fen_info->fi->fib_nh_is_v6) {
+                               NL_SET_ERR_MSG_MOD(info->extack, "IPv6 gateway with IPv4 route is not supported");
+                               return notifier_from_errno(-EINVAL);
+                       }
+               }
                break;
        }
 
@@ -6783,7 +6804,7 @@ static int mlxsw_sp_router_port_check_rif_addr(struct mlxsw_sp *mlxsw_sp,
        /* A RIF is not created for macvlan netdevs. Their MAC is used to
         * populate the FDB
         */
-       if (netif_is_macvlan(dev))
+       if (netif_is_macvlan(dev) || netif_is_l3_master(dev))
                return 0;
 
        for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
index 536c23c578c348d103858c8fe48fec53d22d49a2..560a60e522f9e856682363bfd87ab93e55832199 100644 (file)
@@ -316,7 +316,11 @@ mlxsw_sp_span_gretap4_route(const struct net_device *to_dev,
 
        dev = rt->dst.dev;
        *saddrp = fl4.saddr;
-       *daddrp = rt->rt_gateway;
+       if (rt->rt_gw_family == AF_INET)
+               *daddrp = rt->rt_gw4;
+       /* can not offload if route has an IPv6 gateway */
+       else if (rt->rt_gw_family == AF_INET6)
+               dev = NULL;
 
 out:
        ip_rt_put(rt);
index f6ce386c30367f08a86153b8ac691a1480a1588a..50111f228d77228758d5e0ad634b1848712e11d4 100644 (file)
@@ -1630,7 +1630,7 @@ static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
        u16 fid_index;
        int err = 0;
 
-       if (switchdev_trans_ph_prepare(trans))
+       if (switchdev_trans_ph_commit(trans))
                return 0;
 
        bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
index e1e7e0dd808d5094bcf246c06b55f38a7570316d..0d9356b3f65d8b89f3f2eb15f82cbacf3faea14c 100644 (file)
@@ -30,6 +30,7 @@ struct mlxsw_sib {
        struct mlxsw_sib_port **ports;
        struct mlxsw_core *core;
        const struct mlxsw_bus_info *bus_info;
+       u8 hw_id[ETH_ALEN];
 };
 
 struct mlxsw_sib_port {
@@ -102,6 +103,18 @@ mlxsw_sib_tx_v1_hdr_construct(struct sk_buff *skb,
        mlxsw_tx_v1_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
 }
 
+static int mlxsw_sib_hw_id_get(struct mlxsw_sib *mlxsw_sib)
+{
+       char spad_pl[MLXSW_REG_SPAD_LEN] = {0};
+       int err;
+
+       err = mlxsw_reg_query(mlxsw_sib->core, MLXSW_REG(spad), spad_pl);
+       if (err)
+               return err;
+       mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sib->hw_id);
+       return 0;
+}
+
 static int
 mlxsw_sib_port_admin_status_set(struct mlxsw_sib_port *mlxsw_sib_port,
                                bool is_up)
@@ -268,7 +281,8 @@ static int mlxsw_sib_port_create(struct mlxsw_sib *mlxsw_sib, u8 local_port,
        int err;
 
        err = mlxsw_core_port_init(mlxsw_sib->core, local_port,
-                                  module + 1, false, 0);
+                                  module + 1, false, 0,
+                                  mlxsw_sib->hw_id, sizeof(mlxsw_sib->hw_id));
        if (err) {
                dev_err(mlxsw_sib->bus_info->dev, "Port %d: Failed to init core port\n",
                        local_port);
@@ -440,6 +454,12 @@ static int mlxsw_sib_init(struct mlxsw_core *mlxsw_core,
        mlxsw_sib->core = mlxsw_core;
        mlxsw_sib->bus_info = mlxsw_bus_info;
 
+       err = mlxsw_sib_hw_id_get(mlxsw_sib);
+       if (err) {
+               dev_err(mlxsw_sib->bus_info->dev, "Failed to get switch HW ID\n");
+               return err;
+       }
+
        err = mlxsw_sib_ports_create(mlxsw_sib);
        if (err) {
                dev_err(mlxsw_sib->bus_info->dev, "Failed to create ports\n");
index 5312dc1f339b1e499ca99694df3c3079d0dd6a6a..fc4f19167262fe5ecce840bc6cb1a97c36da4295 100644 (file)
@@ -379,18 +379,6 @@ mlxsw_sx_port_get_stats64(struct net_device *dev,
        stats->tx_dropped       = tx_dropped;
 }
 
-static int mlxsw_sx_port_get_port_parent_id(struct net_device *dev,
-                                           struct netdev_phys_item_id *ppid)
-{
-       struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
-       struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
-
-       ppid->id_len = sizeof(mlxsw_sx->hw_id);
-       memcpy(&ppid->id, &mlxsw_sx->hw_id, ppid->id_len);
-
-       return 0;
-}
-
 static struct devlink_port *
 mlxsw_sx_port_get_devlink_port(struct net_device *dev)
 {
@@ -407,7 +395,6 @@ static const struct net_device_ops mlxsw_sx_port_netdev_ops = {
        .ndo_start_xmit         = mlxsw_sx_port_xmit,
        .ndo_change_mtu         = mlxsw_sx_port_change_mtu,
        .ndo_get_stats64        = mlxsw_sx_port_get_stats64,
-       .ndo_get_port_parent_id = mlxsw_sx_port_get_port_parent_id,
        .ndo_get_devlink_port   = mlxsw_sx_port_get_devlink_port,
 };
 
@@ -1128,7 +1115,8 @@ static int mlxsw_sx_port_eth_create(struct mlxsw_sx *mlxsw_sx, u8 local_port,
        int err;
 
        err = mlxsw_core_port_init(mlxsw_sx->core, local_port,
-                                  module + 1, false, 0);
+                                  module + 1, false, 0,
+                                  mlxsw_sx->hw_id, sizeof(mlxsw_sx->hw_id));
        if (err) {
                dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to init core port\n",
                        local_port);
index a1d0d6e4253324f702f6eecae804fdd574b5f32e..d715ef4fc92fdb61a89122793133db147c1e4f59 100644 (file)
@@ -613,7 +613,7 @@ static int ocelot_mact_mc_add(struct ocelot_port *port,
                              struct netdev_hw_addr *hw_addr)
 {
        struct ocelot *ocelot = port->ocelot;
-       struct netdev_hw_addr *ha = kzalloc(sizeof(*ha), GFP_KERNEL);
+       struct netdev_hw_addr *ha = kzalloc(sizeof(*ha), GFP_ATOMIC);
 
        if (!ha)
                return -ENOMEM;
@@ -959,10 +959,8 @@ static void ocelot_get_strings(struct net_device *netdev, u32 sset, u8 *data)
                       ETH_GSTRING_LEN);
 }
 
-static void ocelot_check_stats(struct work_struct *work)
+static void ocelot_update_stats(struct ocelot *ocelot)
 {
-       struct delayed_work *del_work = to_delayed_work(work);
-       struct ocelot *ocelot = container_of(del_work, struct ocelot, stats_work);
        int i, j;
 
        mutex_lock(&ocelot->stats_lock);
@@ -986,11 +984,19 @@ static void ocelot_check_stats(struct work_struct *work)
                }
        }
 
-       cancel_delayed_work(&ocelot->stats_work);
+       mutex_unlock(&ocelot->stats_lock);
+}
+
+static void ocelot_check_stats_work(struct work_struct *work)
+{
+       struct delayed_work *del_work = to_delayed_work(work);
+       struct ocelot *ocelot = container_of(del_work, struct ocelot,
+                                            stats_work);
+
+       ocelot_update_stats(ocelot);
+
        queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work,
                           OCELOT_STATS_CHECK_DELAY);
-
-       mutex_unlock(&ocelot->stats_lock);
 }
 
 static void ocelot_get_ethtool_stats(struct net_device *dev,
@@ -1001,7 +1007,7 @@ static void ocelot_get_ethtool_stats(struct net_device *dev,
        int i;
 
        /* check and update now */
-       ocelot_check_stats(&ocelot->stats_work.work);
+       ocelot_update_stats(ocelot);
 
        /* Copy all counters */
        for (i = 0; i < ocelot->num_stats; i++)
@@ -1809,7 +1815,7 @@ int ocelot_init(struct ocelot *ocelot)
                                 ANA_CPUQ_8021_CFG_CPUQ_BPDU_VAL(6),
                                 ANA_CPUQ_8021_CFG, i);
 
-       INIT_DELAYED_WORK(&ocelot->stats_work, ocelot_check_stats);
+       INIT_DELAYED_WORK(&ocelot->stats_work, ocelot_check_stats_work);
        queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work,
                           OCELOT_STATS_CHECK_DELAY);
        return 0;
index 7cde387e5ec62a0c36f070a163a6e5b9c38a6a4b..51cd57ab3d9584d3d67508cc94aa6c9590aa11d1 100644 (file)
@@ -2366,6 +2366,7 @@ static void *__vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size,
                                dma_object->addr))) {
                        vxge_os_dma_free(devh->pdev, memblock,
                                &dma_object->acc_handle);
+                       memblock = NULL;
                        goto exit;
                }
 
index 47c708f08ade56676b60190fd9e8e1b20ed62408..0673f3aa2c8df9bae63282f8dc4fadf9e8b160f8 100644 (file)
@@ -15,6 +15,7 @@ nfp-objs := \
            nfpcore/nfp_resource.o \
            nfpcore/nfp_rtsym.o \
            nfpcore/nfp_target.o \
+           ccm.o \
            nfp_asm.o \
            nfp_app.o \
            nfp_app_nic.o \
index 9584f03f3efaf73fbd6894334ca47c98b2e18744..69e84ff7f2e5214e584c8b09722a069264acc0b0 100644 (file)
@@ -261,10 +261,15 @@ int nfp_abm_ctrl_qm_disable(struct nfp_abm *abm)
 
 int nfp_abm_ctrl_prio_map_update(struct nfp_abm_link *alink, u32 *packed)
 {
+       const u32 cmd = NFP_NET_CFG_MBOX_CMD_PCI_DSCP_PRIOMAP_SET;
        struct nfp_net *nn = alink->vnic;
        unsigned int i;
        int err;
 
+       err = nfp_net_mbox_lock(nn, alink->abm->prio_map_len);
+       if (err)
+               return err;
+
        /* Write data_len and wipe reserved */
        nn_writeq(nn, nn->tlv_caps.mbox_off + NFP_NET_ABM_MBOX_DATALEN,
                  alink->abm->prio_map_len);
@@ -273,8 +278,7 @@ int nfp_abm_ctrl_prio_map_update(struct nfp_abm_link *alink, u32 *packed)
                nn_writel(nn, nn->tlv_caps.mbox_off + NFP_NET_ABM_MBOX_DATA + i,
                          packed[i / sizeof(u32)]);
 
-       err = nfp_net_reconfig_mbox(nn,
-                                   NFP_NET_CFG_MBOX_CMD_PCI_DSCP_PRIOMAP_SET);
+       err = nfp_net_mbox_reconfig_and_unlock(nn, cmd);
        if (err)
                nfp_err(alink->abm->app->cpp,
                        "setting DSCP -> VQ map failed with error %d\n", err);
index 4d4ff5844c4735ef36f5a135e1df190e0089ee24..9183b3e85d217dec1328065f4594ce5e5895491d 100644 (file)
@@ -53,7 +53,8 @@ nfp_abm_setup_tc(struct nfp_app *app, struct net_device *netdev,
        }
 }
 
-static struct net_device *nfp_abm_repr_get(struct nfp_app *app, u32 port_id)
+static struct net_device *
+nfp_abm_repr_get(struct nfp_app *app, u32 port_id, bool *redir_egress)
 {
        enum nfp_repr_type rtype;
        struct nfp_reprs *reprs;
@@ -549,5 +550,5 @@ const struct nfp_app_type app_abm = {
        .eswitch_mode_get       = nfp_abm_eswitch_mode_get,
        .eswitch_mode_set       = nfp_abm_eswitch_mode_set,
 
-       .repr_get       = nfp_abm_repr_get,
+       .dev_get        = nfp_abm_repr_get,
 };
index 9b6cfa697879391a611370e20daeb517d115aea7..bc9850e4ec5e646e7e21511a995c5315b1b26062 100644 (file)
@@ -6,48 +6,13 @@
 #include <linux/bug.h>
 #include <linux/jiffies.h>
 #include <linux/skbuff.h>
-#include <linux/wait.h>
 
+#include "../ccm.h"
 #include "../nfp_app.h"
 #include "../nfp_net.h"
 #include "fw.h"
 #include "main.h"
 
-#define NFP_BPF_TAG_ALLOC_SPAN (U16_MAX / 4)
-
-static bool nfp_bpf_all_tags_busy(struct nfp_app_bpf *bpf)
-{
-       u16 used_tags;
-
-       used_tags = bpf->tag_alloc_next - bpf->tag_alloc_last;
-
-       return used_tags > NFP_BPF_TAG_ALLOC_SPAN;
-}
-
-static int nfp_bpf_alloc_tag(struct nfp_app_bpf *bpf)
-{
-       /* All FW communication for BPF is request-reply.  To make sure we
-        * don't reuse the message ID too early after timeout - limit the
-        * number of requests in flight.
-        */
-       if (nfp_bpf_all_tags_busy(bpf)) {
-               cmsg_warn(bpf, "all FW request contexts busy!\n");
-               return -EAGAIN;
-       }
-
-       WARN_ON(__test_and_set_bit(bpf->tag_alloc_next, bpf->tag_allocator));
-       return bpf->tag_alloc_next++;
-}
-
-static void nfp_bpf_free_tag(struct nfp_app_bpf *bpf, u16 tag)
-{
-       WARN_ON(!__test_and_clear_bit(tag, bpf->tag_allocator));
-
-       while (!test_bit(bpf->tag_alloc_last, bpf->tag_allocator) &&
-              bpf->tag_alloc_last != bpf->tag_alloc_next)
-               bpf->tag_alloc_last++;
-}
-
 static struct sk_buff *
 nfp_bpf_cmsg_alloc(struct nfp_app_bpf *bpf, unsigned int size)
 {
@@ -87,149 +52,6 @@ nfp_bpf_cmsg_map_reply_size(struct nfp_app_bpf *bpf, unsigned int n)
        return size;
 }
 
-static u8 nfp_bpf_cmsg_get_type(struct sk_buff *skb)
-{
-       struct cmsg_hdr *hdr;
-
-       hdr = (struct cmsg_hdr *)skb->data;
-
-       return hdr->type;
-}
-
-static unsigned int nfp_bpf_cmsg_get_tag(struct sk_buff *skb)
-{
-       struct cmsg_hdr *hdr;
-
-       hdr = (struct cmsg_hdr *)skb->data;
-
-       return be16_to_cpu(hdr->tag);
-}
-
-static struct sk_buff *__nfp_bpf_reply(struct nfp_app_bpf *bpf, u16 tag)
-{
-       unsigned int msg_tag;
-       struct sk_buff *skb;
-
-       skb_queue_walk(&bpf->cmsg_replies, skb) {
-               msg_tag = nfp_bpf_cmsg_get_tag(skb);
-               if (msg_tag == tag) {
-                       nfp_bpf_free_tag(bpf, tag);
-                       __skb_unlink(skb, &bpf->cmsg_replies);
-                       return skb;
-               }
-       }
-
-       return NULL;
-}
-
-static struct sk_buff *nfp_bpf_reply(struct nfp_app_bpf *bpf, u16 tag)
-{
-       struct sk_buff *skb;
-
-       nfp_ctrl_lock(bpf->app->ctrl);
-       skb = __nfp_bpf_reply(bpf, tag);
-       nfp_ctrl_unlock(bpf->app->ctrl);
-
-       return skb;
-}
-
-static struct sk_buff *nfp_bpf_reply_drop_tag(struct nfp_app_bpf *bpf, u16 tag)
-{
-       struct sk_buff *skb;
-
-       nfp_ctrl_lock(bpf->app->ctrl);
-       skb = __nfp_bpf_reply(bpf, tag);
-       if (!skb)
-               nfp_bpf_free_tag(bpf, tag);
-       nfp_ctrl_unlock(bpf->app->ctrl);
-
-       return skb;
-}
-
-static struct sk_buff *
-nfp_bpf_cmsg_wait_reply(struct nfp_app_bpf *bpf, enum nfp_bpf_cmsg_type type,
-                       int tag)
-{
-       struct sk_buff *skb;
-       int i, err;
-
-       for (i = 0; i < 50; i++) {
-               udelay(4);
-               skb = nfp_bpf_reply(bpf, tag);
-               if (skb)
-                       return skb;
-       }
-
-       err = wait_event_interruptible_timeout(bpf->cmsg_wq,
-                                              skb = nfp_bpf_reply(bpf, tag),
-                                              msecs_to_jiffies(5000));
-       /* We didn't get a response - try last time and atomically drop
-        * the tag even if no response is matched.
-        */
-       if (!skb)
-               skb = nfp_bpf_reply_drop_tag(bpf, tag);
-       if (err < 0) {
-               cmsg_warn(bpf, "%s waiting for response to 0x%02x: %d\n",
-                         err == ERESTARTSYS ? "interrupted" : "error",
-                         type, err);
-               return ERR_PTR(err);
-       }
-       if (!skb) {
-               cmsg_warn(bpf, "timeout waiting for response to 0x%02x\n",
-                         type);
-               return ERR_PTR(-ETIMEDOUT);
-       }
-
-       return skb;
-}
-
-static struct sk_buff *
-nfp_bpf_cmsg_communicate(struct nfp_app_bpf *bpf, struct sk_buff *skb,
-                        enum nfp_bpf_cmsg_type type, unsigned int reply_size)
-{
-       struct cmsg_hdr *hdr;
-       int tag;
-
-       nfp_ctrl_lock(bpf->app->ctrl);
-       tag = nfp_bpf_alloc_tag(bpf);
-       if (tag < 0) {
-               nfp_ctrl_unlock(bpf->app->ctrl);
-               dev_kfree_skb_any(skb);
-               return ERR_PTR(tag);
-       }
-
-       hdr = (void *)skb->data;
-       hdr->ver = CMSG_MAP_ABI_VERSION;
-       hdr->type = type;
-       hdr->tag = cpu_to_be16(tag);
-
-       __nfp_app_ctrl_tx(bpf->app, skb);
-
-       nfp_ctrl_unlock(bpf->app->ctrl);
-
-       skb = nfp_bpf_cmsg_wait_reply(bpf, type, tag);
-       if (IS_ERR(skb))
-               return skb;
-
-       hdr = (struct cmsg_hdr *)skb->data;
-       if (hdr->type != __CMSG_REPLY(type)) {
-               cmsg_warn(bpf, "cmsg drop - wrong type 0x%02x != 0x%02lx!\n",
-                         hdr->type, __CMSG_REPLY(type));
-               goto err_free;
-       }
-       /* 0 reply_size means caller will do the validation */
-       if (reply_size && skb->len != reply_size) {
-               cmsg_warn(bpf, "cmsg drop - type 0x%02x wrong size %d != %d!\n",
-                         type, skb->len, reply_size);
-               goto err_free;
-       }
-
-       return skb;
-err_free:
-       dev_kfree_skb_any(skb);
-       return ERR_PTR(-EIO);
-}
-
 static int
 nfp_bpf_ctrl_rc_to_errno(struct nfp_app_bpf *bpf,
                         struct cmsg_reply_map_simple *reply)
@@ -275,8 +97,8 @@ nfp_bpf_ctrl_alloc_map(struct nfp_app_bpf *bpf, struct bpf_map *map)
        req->map_type = cpu_to_be32(map->map_type);
        req->map_flags = 0;
 
-       skb = nfp_bpf_cmsg_communicate(bpf, skb, CMSG_TYPE_MAP_ALLOC,
-                                      sizeof(*reply));
+       skb = nfp_ccm_communicate(&bpf->ccm, skb, NFP_CCM_TYPE_BPF_MAP_ALLOC,
+                                 sizeof(*reply));
        if (IS_ERR(skb))
                return PTR_ERR(skb);
 
@@ -310,8 +132,8 @@ void nfp_bpf_ctrl_free_map(struct nfp_app_bpf *bpf, struct nfp_bpf_map *nfp_map)
        req = (void *)skb->data;
        req->tid = cpu_to_be32(nfp_map->tid);
 
-       skb = nfp_bpf_cmsg_communicate(bpf, skb, CMSG_TYPE_MAP_FREE,
-                                      sizeof(*reply));
+       skb = nfp_ccm_communicate(&bpf->ccm, skb, NFP_CCM_TYPE_BPF_MAP_FREE,
+                                 sizeof(*reply));
        if (IS_ERR(skb)) {
                cmsg_warn(bpf, "leaking map - I/O error\n");
                return;
@@ -354,8 +176,7 @@ nfp_bpf_ctrl_reply_val(struct nfp_app_bpf *bpf, struct cmsg_reply_map_op *reply,
 }
 
 static int
-nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap,
-                     enum nfp_bpf_cmsg_type op,
+nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap, enum nfp_ccm_type op,
                      u8 *key, u8 *value, u64 flags, u8 *out_key, u8 *out_value)
 {
        struct nfp_bpf_map *nfp_map = offmap->dev_priv;
@@ -386,8 +207,8 @@ nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap,
                memcpy(nfp_bpf_ctrl_req_val(bpf, req, 0), value,
                       map->value_size);
 
-       skb = nfp_bpf_cmsg_communicate(bpf, skb, op,
-                                      nfp_bpf_cmsg_map_reply_size(bpf, 1));
+       skb = nfp_ccm_communicate(&bpf->ccm, skb, op,
+                                 nfp_bpf_cmsg_map_reply_size(bpf, 1));
        if (IS_ERR(skb))
                return PTR_ERR(skb);
 
@@ -415,34 +236,34 @@ nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap,
 int nfp_bpf_ctrl_update_entry(struct bpf_offloaded_map *offmap,
                              void *key, void *value, u64 flags)
 {
-       return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_UPDATE,
+       return nfp_bpf_ctrl_entry_op(offmap, NFP_CCM_TYPE_BPF_MAP_UPDATE,
                                     key, value, flags, NULL, NULL);
 }
 
 int nfp_bpf_ctrl_del_entry(struct bpf_offloaded_map *offmap, void *key)
 {
-       return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_DELETE,
+       return nfp_bpf_ctrl_entry_op(offmap, NFP_CCM_TYPE_BPF_MAP_DELETE,
                                     key, NULL, 0, NULL, NULL);
 }
 
 int nfp_bpf_ctrl_lookup_entry(struct bpf_offloaded_map *offmap,
                              void *key, void *value)
 {
-       return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_LOOKUP,
+       return nfp_bpf_ctrl_entry_op(offmap, NFP_CCM_TYPE_BPF_MAP_LOOKUP,
                                     key, NULL, 0, NULL, value);
 }
 
 int nfp_bpf_ctrl_getfirst_entry(struct bpf_offloaded_map *offmap,
                                void *next_key)
 {
-       return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_GETFIRST,
+       return nfp_bpf_ctrl_entry_op(offmap, NFP_CCM_TYPE_BPF_MAP_GETFIRST,
                                     NULL, NULL, 0, next_key, NULL);
 }
 
 int nfp_bpf_ctrl_getnext_entry(struct bpf_offloaded_map *offmap,
                               void *key, void *next_key)
 {
-       return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_GETNEXT,
+       return nfp_bpf_ctrl_entry_op(offmap, NFP_CCM_TYPE_BPF_MAP_GETNEXT,
                                     key, NULL, 0, next_key, NULL);
 }
 
@@ -456,54 +277,35 @@ unsigned int nfp_bpf_ctrl_cmsg_mtu(struct nfp_app_bpf *bpf)
 void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb)
 {
        struct nfp_app_bpf *bpf = app->priv;
-       unsigned int tag;
 
        if (unlikely(skb->len < sizeof(struct cmsg_reply_map_simple))) {
                cmsg_warn(bpf, "cmsg drop - too short %d!\n", skb->len);
-               goto err_free;
+               dev_kfree_skb_any(skb);
+               return;
        }
 
-       if (nfp_bpf_cmsg_get_type(skb) == CMSG_TYPE_BPF_EVENT) {
+       if (nfp_ccm_get_type(skb) == NFP_CCM_TYPE_BPF_BPF_EVENT) {
                if (!nfp_bpf_event_output(bpf, skb->data, skb->len))
                        dev_consume_skb_any(skb);
                else
                        dev_kfree_skb_any(skb);
-               return;
        }
 
-       nfp_ctrl_lock(bpf->app->ctrl);
-
-       tag = nfp_bpf_cmsg_get_tag(skb);
-       if (unlikely(!test_bit(tag, bpf->tag_allocator))) {
-               cmsg_warn(bpf, "cmsg drop - no one is waiting for tag %u!\n",
-                         tag);
-               goto err_unlock;
-       }
-
-       __skb_queue_tail(&bpf->cmsg_replies, skb);
-       wake_up_interruptible_all(&bpf->cmsg_wq);
-
-       nfp_ctrl_unlock(bpf->app->ctrl);
-
-       return;
-err_unlock:
-       nfp_ctrl_unlock(bpf->app->ctrl);
-err_free:
-       dev_kfree_skb_any(skb);
+       nfp_ccm_rx(&bpf->ccm, skb);
 }
 
 void
 nfp_bpf_ctrl_msg_rx_raw(struct nfp_app *app, const void *data, unsigned int len)
 {
+       const struct nfp_ccm_hdr *hdr = data;
        struct nfp_app_bpf *bpf = app->priv;
-       const struct cmsg_hdr *hdr = data;
 
        if (unlikely(len < sizeof(struct cmsg_reply_map_simple))) {
                cmsg_warn(bpf, "cmsg drop - too short %d!\n", len);
                return;
        }
 
-       if (hdr->type == CMSG_TYPE_BPF_EVENT)
+       if (hdr->type == NFP_CCM_TYPE_BPF_BPF_EVENT)
                nfp_bpf_event_output(bpf, data, len);
        else
                cmsg_warn(bpf, "cmsg drop - msg type %d with raw buffer!\n",
index 721921bcf120b8f126de424e4d5fc40e55487aa1..06c4286bd79e0a0b3578da221783a20d54d8a7f9 100644 (file)
@@ -6,6 +6,7 @@
 
 #include <linux/bitops.h>
 #include <linux/types.h>
+#include "../ccm.h"
 
 /* Kernel's enum bpf_reg_type is not uABI so people may change it breaking
  * our FW ABI.  In that case we will do translation in the driver.
@@ -52,22 +53,6 @@ struct nfp_bpf_cap_tlv_maps {
 /*
  * Types defined for map related control messages
  */
-#define CMSG_MAP_ABI_VERSION           1
-
-enum nfp_bpf_cmsg_type {
-       CMSG_TYPE_MAP_ALLOC     = 1,
-       CMSG_TYPE_MAP_FREE      = 2,
-       CMSG_TYPE_MAP_LOOKUP    = 3,
-       CMSG_TYPE_MAP_UPDATE    = 4,
-       CMSG_TYPE_MAP_DELETE    = 5,
-       CMSG_TYPE_MAP_GETNEXT   = 6,
-       CMSG_TYPE_MAP_GETFIRST  = 7,
-       CMSG_TYPE_BPF_EVENT     = 8,
-       __CMSG_TYPE_MAP_MAX,
-};
-
-#define CMSG_TYPE_MAP_REPLY_BIT                7
-#define __CMSG_REPLY(req)              (BIT(CMSG_TYPE_MAP_REPLY_BIT) | (req))
 
 /* BPF ABIv2 fixed-length control message fields */
 #define CMSG_MAP_KEY_LW                        16
@@ -84,19 +69,13 @@ enum nfp_bpf_cmsg_status {
        CMSG_RC_ERR_MAP_E2BIG           = 7,
 };
 
-struct cmsg_hdr {
-       u8 type;
-       u8 ver;
-       __be16 tag;
-};
-
 struct cmsg_reply_map_simple {
-       struct cmsg_hdr hdr;
+       struct nfp_ccm_hdr hdr;
        __be32 rc;
 };
 
 struct cmsg_req_map_alloc_tbl {
-       struct cmsg_hdr hdr;
+       struct nfp_ccm_hdr hdr;
        __be32 key_size;                /* in bytes */
        __be32 value_size;              /* in bytes */
        __be32 max_entries;
@@ -110,7 +89,7 @@ struct cmsg_reply_map_alloc_tbl {
 };
 
 struct cmsg_req_map_free_tbl {
-       struct cmsg_hdr hdr;
+       struct nfp_ccm_hdr hdr;
        __be32 tid;
 };
 
@@ -120,7 +99,7 @@ struct cmsg_reply_map_free_tbl {
 };
 
 struct cmsg_req_map_op {
-       struct cmsg_hdr hdr;
+       struct nfp_ccm_hdr hdr;
        __be32 tid;
        __be32 count;
        __be32 flags;
@@ -135,7 +114,7 @@ struct cmsg_reply_map_op {
 };
 
 struct cmsg_bpf_event {
-       struct cmsg_hdr hdr;
+       struct nfp_ccm_hdr hdr;
        __be32 cpu_id;
        __be64 map_ptr;
        __be32 data_size;
index 275de9f4c61c635c69c5a6ac657e22511762998a..9c136da252214cd8c4fabd90e90e0b5e4e3c678e 100644 (file)
@@ -442,14 +442,16 @@ static int nfp_bpf_init(struct nfp_app *app)
        bpf->app = app;
        app->priv = bpf;
 
-       skb_queue_head_init(&bpf->cmsg_replies);
-       init_waitqueue_head(&bpf->cmsg_wq);
        INIT_LIST_HEAD(&bpf->map_list);
 
-       err = rhashtable_init(&bpf->maps_neutral, &nfp_bpf_maps_neutral_params);
+       err = nfp_ccm_init(&bpf->ccm, app);
        if (err)
                goto err_free_bpf;
 
+       err = rhashtable_init(&bpf->maps_neutral, &nfp_bpf_maps_neutral_params);
+       if (err)
+               goto err_clean_ccm;
+
        nfp_bpf_init_capabilities(bpf);
 
        err = nfp_bpf_parse_capabilities(app);
@@ -474,6 +476,8 @@ static int nfp_bpf_init(struct nfp_app *app)
 
 err_free_neutral_maps:
        rhashtable_destroy(&bpf->maps_neutral);
+err_clean_ccm:
+       nfp_ccm_clean(&bpf->ccm);
 err_free_bpf:
        kfree(bpf);
        return err;
@@ -484,7 +488,7 @@ static void nfp_bpf_clean(struct nfp_app *app)
        struct nfp_app_bpf *bpf = app->priv;
 
        bpf_offload_dev_destroy(bpf->bpf_dev);
-       WARN_ON(!skb_queue_empty(&bpf->cmsg_replies));
+       nfp_ccm_clean(&bpf->ccm);
        WARN_ON(!list_empty(&bpf->map_list));
        WARN_ON(bpf->maps_in_use || bpf->map_elems_in_use);
        rhashtable_free_and_destroy(&bpf->maps_neutral,
index b25a48218bcf7937320ee8e9fa174aaedbea744d..e54d1ac84df278737b39a5ee40bded7e1b43111e 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/types.h>
 #include <linux/wait.h>
 
+#include "../ccm.h"
 #include "../nfp_asm.h"
 #include "fw.h"
 
@@ -84,16 +85,10 @@ enum pkt_vec {
 /**
  * struct nfp_app_bpf - bpf app priv structure
  * @app:               backpointer to the app
+ * @ccm:               common control message handler data
  *
  * @bpf_dev:           BPF offload device handle
  *
- * @tag_allocator:     bitmap of control message tags in use
- * @tag_alloc_next:    next tag bit to allocate
- * @tag_alloc_last:    next tag bit to be freed
- *
- * @cmsg_replies:      received cmsg replies waiting to be consumed
- * @cmsg_wq:           work queue for waiting for cmsg replies
- *
  * @cmsg_key_sz:       size of key in cmsg element array
  * @cmsg_val_sz:       size of value in cmsg element array
  *
@@ -132,16 +127,10 @@ enum pkt_vec {
  */
 struct nfp_app_bpf {
        struct nfp_app *app;
+       struct nfp_ccm ccm;
 
        struct bpf_offload_dev *bpf_dev;
 
-       DECLARE_BITMAP(tag_allocator, U16_MAX + 1);
-       u16 tag_alloc_next;
-       u16 tag_alloc_last;
-
-       struct sk_buff_head cmsg_replies;
-       struct wait_queue_head cmsg_wq;
-
        unsigned int cmsg_key_sz;
        unsigned int cmsg_val_sz;
 
index 15dce97650a5af251d0be613e66735221270a195..39c9fec222b45823eca3772a77b239938188fcd2 100644 (file)
@@ -22,6 +22,7 @@
 #include <net/tc_act/tc_mirred.h>
 
 #include "main.h"
+#include "../ccm.h"
 #include "../nfp_app.h"
 #include "../nfp_net_ctrl.h"
 #include "../nfp_net.h"
@@ -452,7 +453,7 @@ int nfp_bpf_event_output(struct nfp_app_bpf *bpf, const void *data,
 
        if (len < sizeof(struct cmsg_bpf_event) + pkt_size + data_size)
                return -EINVAL;
-       if (cbe->hdr.ver != CMSG_MAP_ABI_VERSION)
+       if (cbe->hdr.ver != NFP_CCM_ABI_VERSION)
                return -EINVAL;
 
        rcu_read_lock();
diff --git a/drivers/net/ethernet/netronome/nfp/ccm.c b/drivers/net/ethernet/netronome/nfp/ccm.c
new file mode 100644 (file)
index 0000000..94476e4
--- /dev/null
@@ -0,0 +1,220 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2016-2019 Netronome Systems, Inc. */
+
+#include <linux/bitops.h>
+
+#include "ccm.h"
+#include "nfp_app.h"
+#include "nfp_net.h"
+
+#define NFP_CCM_TYPE_REPLY_BIT         7
+#define __NFP_CCM_REPLY(req)           (BIT(NFP_CCM_TYPE_REPLY_BIT) | (req))
+
+#define ccm_warn(app, msg...)  nn_dp_warn(&(app)->ctrl->dp, msg)
+
+#define NFP_CCM_TAG_ALLOC_SPAN (U16_MAX / 4)
+
+static bool nfp_ccm_all_tags_busy(struct nfp_ccm *ccm)
+{
+       u16 used_tags;
+
+       used_tags = ccm->tag_alloc_next - ccm->tag_alloc_last;
+
+       return used_tags > NFP_CCM_TAG_ALLOC_SPAN;
+}
+
+static int nfp_ccm_alloc_tag(struct nfp_ccm *ccm)
+{
+       /* CCM is for FW communication which is request-reply.  To make sure
+        * we don't reuse the message ID too early after timeout - limit the
+        * number of requests in flight.
+        */
+       if (unlikely(nfp_ccm_all_tags_busy(ccm))) {
+               ccm_warn(ccm->app, "all FW request contexts busy!\n");
+               return -EAGAIN;
+       }
+
+       WARN_ON(__test_and_set_bit(ccm->tag_alloc_next, ccm->tag_allocator));
+       return ccm->tag_alloc_next++;
+}
+
+static void nfp_ccm_free_tag(struct nfp_ccm *ccm, u16 tag)
+{
+       WARN_ON(!__test_and_clear_bit(tag, ccm->tag_allocator));
+
+       while (!test_bit(ccm->tag_alloc_last, ccm->tag_allocator) &&
+              ccm->tag_alloc_last != ccm->tag_alloc_next)
+               ccm->tag_alloc_last++;
+}
+
+static struct sk_buff *__nfp_ccm_reply(struct nfp_ccm *ccm, u16 tag)
+{
+       unsigned int msg_tag;
+       struct sk_buff *skb;
+
+       skb_queue_walk(&ccm->replies, skb) {
+               msg_tag = nfp_ccm_get_tag(skb);
+               if (msg_tag == tag) {
+                       nfp_ccm_free_tag(ccm, tag);
+                       __skb_unlink(skb, &ccm->replies);
+                       return skb;
+               }
+       }
+
+       return NULL;
+}
+
+static struct sk_buff *
+nfp_ccm_reply(struct nfp_ccm *ccm, struct nfp_app *app, u16 tag)
+{
+       struct sk_buff *skb;
+
+       nfp_ctrl_lock(app->ctrl);
+       skb = __nfp_ccm_reply(ccm, tag);
+       nfp_ctrl_unlock(app->ctrl);
+
+       return skb;
+}
+
+static struct sk_buff *
+nfp_ccm_reply_drop_tag(struct nfp_ccm *ccm, struct nfp_app *app, u16 tag)
+{
+       struct sk_buff *skb;
+
+       nfp_ctrl_lock(app->ctrl);
+       skb = __nfp_ccm_reply(ccm, tag);
+       if (!skb)
+               nfp_ccm_free_tag(ccm, tag);
+       nfp_ctrl_unlock(app->ctrl);
+
+       return skb;
+}
+
+static struct sk_buff *
+nfp_ccm_wait_reply(struct nfp_ccm *ccm, struct nfp_app *app,
+                  enum nfp_ccm_type type, int tag)
+{
+       struct sk_buff *skb;
+       int i, err;
+
+       for (i = 0; i < 50; i++) {
+               udelay(4);
+               skb = nfp_ccm_reply(ccm, app, tag);
+               if (skb)
+                       return skb;
+       }
+
+       err = wait_event_interruptible_timeout(ccm->wq,
+                                              skb = nfp_ccm_reply(ccm, app,
+                                                                  tag),
+                                              msecs_to_jiffies(5000));
+       /* We didn't get a response - try last time and atomically drop
+        * the tag even if no response is matched.
+        */
+       if (!skb)
+               skb = nfp_ccm_reply_drop_tag(ccm, app, tag);
+       if (err < 0) {
+               ccm_warn(app, "%s waiting for response to 0x%02x: %d\n",
+                        err == ERESTARTSYS ? "interrupted" : "error",
+                        type, err);
+               return ERR_PTR(err);
+       }
+       if (!skb) {
+               ccm_warn(app, "timeout waiting for response to 0x%02x\n", type);
+               return ERR_PTR(-ETIMEDOUT);
+       }
+
+       return skb;
+}
+
+struct sk_buff *
+nfp_ccm_communicate(struct nfp_ccm *ccm, struct sk_buff *skb,
+                   enum nfp_ccm_type type, unsigned int reply_size)
+{
+       struct nfp_app *app = ccm->app;
+       struct nfp_ccm_hdr *hdr;
+       int reply_type, tag;
+
+       nfp_ctrl_lock(app->ctrl);
+       tag = nfp_ccm_alloc_tag(ccm);
+       if (tag < 0) {
+               nfp_ctrl_unlock(app->ctrl);
+               dev_kfree_skb_any(skb);
+               return ERR_PTR(tag);
+       }
+
+       hdr = (void *)skb->data;
+       hdr->ver = NFP_CCM_ABI_VERSION;
+       hdr->type = type;
+       hdr->tag = cpu_to_be16(tag);
+
+       __nfp_app_ctrl_tx(app, skb);
+
+       nfp_ctrl_unlock(app->ctrl);
+
+       skb = nfp_ccm_wait_reply(ccm, app, type, tag);
+       if (IS_ERR(skb))
+               return skb;
+
+       reply_type = nfp_ccm_get_type(skb);
+       if (reply_type != __NFP_CCM_REPLY(type)) {
+               ccm_warn(app, "cmsg drop - wrong type 0x%02x != 0x%02lx!\n",
+                        reply_type, __NFP_CCM_REPLY(type));
+               goto err_free;
+       }
+       /* 0 reply_size means caller will do the validation */
+       if (reply_size && skb->len != reply_size) {
+               ccm_warn(app, "cmsg drop - type 0x%02x wrong size %d != %d!\n",
+                        type, skb->len, reply_size);
+               goto err_free;
+       }
+
+       return skb;
+err_free:
+       dev_kfree_skb_any(skb);
+       return ERR_PTR(-EIO);
+}
+
+void nfp_ccm_rx(struct nfp_ccm *ccm, struct sk_buff *skb)
+{
+       struct nfp_app *app = ccm->app;
+       unsigned int tag;
+
+       if (unlikely(skb->len < sizeof(struct nfp_ccm_hdr))) {
+               ccm_warn(app, "cmsg drop - too short %d!\n", skb->len);
+               goto err_free;
+       }
+
+       nfp_ctrl_lock(app->ctrl);
+
+       tag = nfp_ccm_get_tag(skb);
+       if (unlikely(!test_bit(tag, ccm->tag_allocator))) {
+               ccm_warn(app, "cmsg drop - no one is waiting for tag %u!\n",
+                        tag);
+               goto err_unlock;
+       }
+
+       __skb_queue_tail(&ccm->replies, skb);
+       wake_up_interruptible_all(&ccm->wq);
+
+       nfp_ctrl_unlock(app->ctrl);
+       return;
+
+err_unlock:
+       nfp_ctrl_unlock(app->ctrl);
+err_free:
+       dev_kfree_skb_any(skb);
+}
+
+int nfp_ccm_init(struct nfp_ccm *ccm, struct nfp_app *app)
+{
+       ccm->app = app;
+       skb_queue_head_init(&ccm->replies);
+       init_waitqueue_head(&ccm->wq);
+       return 0;
+}
+
+void nfp_ccm_clean(struct nfp_ccm *ccm)
+{
+       WARN_ON(!skb_queue_empty(&ccm->replies));
+}
diff --git a/drivers/net/ethernet/netronome/nfp/ccm.h b/drivers/net/ethernet/netronome/nfp/ccm.h
new file mode 100644 (file)
index 0000000..e2fe4b8
--- /dev/null
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2016-2019 Netronome Systems, Inc. */
+
+#ifndef NFP_CCM_H
+#define NFP_CCM_H 1
+
+#include <linux/bitmap.h>
+#include <linux/skbuff.h>
+#include <linux/wait.h>
+
+struct nfp_app;
+
+/* Firmware ABI */
+
+enum nfp_ccm_type {
+       NFP_CCM_TYPE_BPF_MAP_ALLOC      = 1,
+       NFP_CCM_TYPE_BPF_MAP_FREE       = 2,
+       NFP_CCM_TYPE_BPF_MAP_LOOKUP     = 3,
+       NFP_CCM_TYPE_BPF_MAP_UPDATE     = 4,
+       NFP_CCM_TYPE_BPF_MAP_DELETE     = 5,
+       NFP_CCM_TYPE_BPF_MAP_GETNEXT    = 6,
+       NFP_CCM_TYPE_BPF_MAP_GETFIRST   = 7,
+       NFP_CCM_TYPE_BPF_BPF_EVENT      = 8,
+       __NFP_CCM_TYPE_MAX,
+};
+
+#define NFP_CCM_ABI_VERSION            1
+
+struct nfp_ccm_hdr {
+       u8 type;
+       u8 ver;
+       __be16 tag;
+};
+
+static inline u8 nfp_ccm_get_type(struct sk_buff *skb)
+{
+       struct nfp_ccm_hdr *hdr;
+
+       hdr = (struct nfp_ccm_hdr *)skb->data;
+
+       return hdr->type;
+}
+
+static inline unsigned int nfp_ccm_get_tag(struct sk_buff *skb)
+{
+       struct nfp_ccm_hdr *hdr;
+
+       hdr = (struct nfp_ccm_hdr *)skb->data;
+
+       return be16_to_cpu(hdr->tag);
+}
+
+/* Implementation */
+
+/**
+ * struct nfp_ccm - common control message handling
+ * @tag_allocator:     bitmap of control message tags in use
+ * @tag_alloc_next:    next tag bit to allocate
+ * @tag_alloc_last:    next tag bit to be freed
+ *
+ * @replies:           received cmsg replies waiting to be consumed
+ * @wq:                        work queue for waiting for cmsg replies
+ */
+struct nfp_ccm {
+       struct nfp_app *app;
+
+       DECLARE_BITMAP(tag_allocator, U16_MAX + 1);
+       u16 tag_alloc_next;
+       u16 tag_alloc_last;
+
+       struct sk_buff_head replies;
+       struct wait_queue_head wq;
+};
+
+int nfp_ccm_init(struct nfp_ccm *ccm, struct nfp_app *app);
+void nfp_ccm_clean(struct nfp_ccm *ccm);
+void nfp_ccm_rx(struct nfp_ccm *ccm, struct sk_buff *skb);
+struct sk_buff *
+nfp_ccm_communicate(struct nfp_ccm *ccm, struct sk_buff *skb,
+                   enum nfp_ccm_type type, unsigned int reply_size);
+#endif
index ce54b6c2a9ad69b3efb6ef44bfcbbe723b88d141..c56e31d9f8a4f9836972996685e071a5f24962d4 100644 (file)
@@ -48,8 +48,7 @@ nfp_fl_push_vlan(struct nfp_fl_push_vlan *push_vlan,
 
        tmp_push_vlan_tci =
                FIELD_PREP(NFP_FL_PUSH_VLAN_PRIO, act->vlan.prio) |
-               FIELD_PREP(NFP_FL_PUSH_VLAN_VID, act->vlan.vid) |
-               NFP_FL_PUSH_VLAN_CFI;
+               FIELD_PREP(NFP_FL_PUSH_VLAN_VID, act->vlan.vid);
        push_vlan->vlan_tci = cpu_to_be16(tmp_push_vlan_tci);
 }
 
@@ -583,60 +582,23 @@ static u32 nfp_fl_csum_l4_to_flag(u8 ip_proto)
        }
 }
 
-static int
-nfp_fl_pedit(const struct flow_action_entry *act,
-            struct tc_cls_flower_offload *flow,
-            char *nfp_action, int *a_len, u32 *csum_updated)
-{
-       struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
+struct nfp_flower_pedit_acts {
        struct nfp_fl_set_ipv6_addr set_ip6_dst, set_ip6_src;
        struct nfp_fl_set_ipv6_tc_hl_fl set_ip6_tc_hl_fl;
        struct nfp_fl_set_ip4_ttl_tos set_ip_ttl_tos;
        struct nfp_fl_set_ip4_addrs set_ip_addr;
-       enum flow_action_mangle_base htype;
        struct nfp_fl_set_tport set_tport;
        struct nfp_fl_set_eth set_eth;
+};
+
+static int
+nfp_fl_commit_mangle(struct tc_cls_flower_offload *flow, char *nfp_action,
+                    int *a_len, struct nfp_flower_pedit_acts *set_act,
+                    u32 *csum_updated)
+{
+       struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
        size_t act_size = 0;
        u8 ip_proto = 0;
-       u32 offset;
-       int err;
-
-       memset(&set_ip6_tc_hl_fl, 0, sizeof(set_ip6_tc_hl_fl));
-       memset(&set_ip_ttl_tos, 0, sizeof(set_ip_ttl_tos));
-       memset(&set_ip6_dst, 0, sizeof(set_ip6_dst));
-       memset(&set_ip6_src, 0, sizeof(set_ip6_src));
-       memset(&set_ip_addr, 0, sizeof(set_ip_addr));
-       memset(&set_tport, 0, sizeof(set_tport));
-       memset(&set_eth, 0, sizeof(set_eth));
-
-       htype = act->mangle.htype;
-       offset = act->mangle.offset;
-
-       switch (htype) {
-       case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH:
-               err = nfp_fl_set_eth(act, offset, &set_eth);
-               break;
-       case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4:
-               err = nfp_fl_set_ip4(act, offset, &set_ip_addr,
-                                    &set_ip_ttl_tos);
-               break;
-       case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6:
-               err = nfp_fl_set_ip6(act, offset, &set_ip6_dst,
-                                    &set_ip6_src, &set_ip6_tc_hl_fl);
-               break;
-       case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP:
-               err = nfp_fl_set_tport(act, offset, &set_tport,
-                                      NFP_FL_ACTION_OPCODE_SET_TCP);
-               break;
-       case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP:
-               err = nfp_fl_set_tport(act, offset, &set_tport,
-                                      NFP_FL_ACTION_OPCODE_SET_UDP);
-               break;
-       default:
-               return -EOPNOTSUPP;
-       }
-       if (err)
-               return err;
 
        if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
                struct flow_match_basic match;
@@ -645,77 +607,82 @@ nfp_fl_pedit(const struct flow_action_entry *act,
                ip_proto = match.key->ip_proto;
        }
 
-       if (set_eth.head.len_lw) {
-               act_size = sizeof(set_eth);
-               memcpy(nfp_action, &set_eth, act_size);
+       if (set_act->set_eth.head.len_lw) {
+               act_size = sizeof(set_act->set_eth);
+               memcpy(nfp_action, &set_act->set_eth, act_size);
                *a_len += act_size;
        }
-       if (set_ip_ttl_tos.head.len_lw) {
+
+       if (set_act->set_ip_ttl_tos.head.len_lw) {
                nfp_action += act_size;
-               act_size = sizeof(set_ip_ttl_tos);
-               memcpy(nfp_action, &set_ip_ttl_tos, act_size);
+               act_size = sizeof(set_act->set_ip_ttl_tos);
+               memcpy(nfp_action, &set_act->set_ip_ttl_tos, act_size);
                *a_len += act_size;
 
                /* Hardware will automatically fix IPv4 and TCP/UDP checksum. */
                *csum_updated |= TCA_CSUM_UPDATE_FLAG_IPV4HDR |
                                nfp_fl_csum_l4_to_flag(ip_proto);
        }
-       if (set_ip_addr.head.len_lw) {
+
+       if (set_act->set_ip_addr.head.len_lw) {
                nfp_action += act_size;
-               act_size = sizeof(set_ip_addr);
-               memcpy(nfp_action, &set_ip_addr, act_size);
+               act_size = sizeof(set_act->set_ip_addr);
+               memcpy(nfp_action, &set_act->set_ip_addr, act_size);
                *a_len += act_size;
 
                /* Hardware will automatically fix IPv4 and TCP/UDP checksum. */
                *csum_updated |= TCA_CSUM_UPDATE_FLAG_IPV4HDR |
                                nfp_fl_csum_l4_to_flag(ip_proto);
        }
-       if (set_ip6_tc_hl_fl.head.len_lw) {
+
+       if (set_act->set_ip6_tc_hl_fl.head.len_lw) {
                nfp_action += act_size;
-               act_size = sizeof(set_ip6_tc_hl_fl);
-               memcpy(nfp_action, &set_ip6_tc_hl_fl, act_size);
+               act_size = sizeof(set_act->set_ip6_tc_hl_fl);
+               memcpy(nfp_action, &set_act->set_ip6_tc_hl_fl, act_size);
                *a_len += act_size;
 
                /* Hardware will automatically fix TCP/UDP checksum. */
                *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
        }
-       if (set_ip6_dst.head.len_lw && set_ip6_src.head.len_lw) {
+
+       if (set_act->set_ip6_dst.head.len_lw &&
+           set_act->set_ip6_src.head.len_lw) {
                /* TC compiles set src and dst IPv6 address as a single action,
                 * the hardware requires this to be 2 separate actions.
                 */
                nfp_action += act_size;
-               act_size = sizeof(set_ip6_src);
-               memcpy(nfp_action, &set_ip6_src, act_size);
+               act_size = sizeof(set_act->set_ip6_src);
+               memcpy(nfp_action, &set_act->set_ip6_src, act_size);
                *a_len += act_size;
 
-               act_size = sizeof(set_ip6_dst);
-               memcpy(&nfp_action[sizeof(set_ip6_src)], &set_ip6_dst,
-                      act_size);
+               act_size = sizeof(set_act->set_ip6_dst);
+               memcpy(&nfp_action[sizeof(set_act->set_ip6_src)],
+                      &set_act->set_ip6_dst, act_size);
                *a_len += act_size;
 
                /* Hardware will automatically fix TCP/UDP checksum. */
                *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
-       } else if (set_ip6_dst.head.len_lw) {
+       } else if (set_act->set_ip6_dst.head.len_lw) {
                nfp_action += act_size;
-               act_size = sizeof(set_ip6_dst);
-               memcpy(nfp_action, &set_ip6_dst, act_size);
+               act_size = sizeof(set_act->set_ip6_dst);
+               memcpy(nfp_action, &set_act->set_ip6_dst, act_size);
                *a_len += act_size;
 
                /* Hardware will automatically fix TCP/UDP checksum. */
                *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
-       } else if (set_ip6_src.head.len_lw) {
+       } else if (set_act->set_ip6_src.head.len_lw) {
                nfp_action += act_size;
-               act_size = sizeof(set_ip6_src);
-               memcpy(nfp_action, &set_ip6_src, act_size);
+               act_size = sizeof(set_act->set_ip6_src);
+               memcpy(nfp_action, &set_act->set_ip6_src, act_size);
                *a_len += act_size;
 
                /* Hardware will automatically fix TCP/UDP checksum. */
                *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
        }
-       if (set_tport.head.len_lw) {
+       if (set_act->set_tport.head.len_lw) {
                nfp_action += act_size;
-               act_size = sizeof(set_tport);
-               memcpy(nfp_action, &set_tport, act_size);
+               act_size = sizeof(set_act->set_tport);
+               memcpy(nfp_action, &set_act->set_tport, act_size);
                *a_len += act_size;
 
                /* Hardware will automatically fix TCP/UDP checksum. */
@@ -726,7 +693,40 @@ nfp_fl_pedit(const struct flow_action_entry *act,
 }
 
 static int
-nfp_flower_output_action(struct nfp_app *app, const struct flow_action_entry *act,
+nfp_fl_pedit(const struct flow_action_entry *act,
+            struct tc_cls_flower_offload *flow, char *nfp_action, int *a_len,
+            u32 *csum_updated, struct nfp_flower_pedit_acts *set_act)
+{
+       enum flow_action_mangle_base htype;
+       u32 offset;
+
+       htype = act->mangle.htype;
+       offset = act->mangle.offset;
+
+       switch (htype) {
+       case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH:
+               return nfp_fl_set_eth(act, offset, &set_act->set_eth);
+       case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4:
+               return nfp_fl_set_ip4(act, offset, &set_act->set_ip_addr,
+                                     &set_act->set_ip_ttl_tos);
+       case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6:
+               return nfp_fl_set_ip6(act, offset, &set_act->set_ip6_dst,
+                                     &set_act->set_ip6_src,
+                                     &set_act->set_ip6_tc_hl_fl);
+       case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP:
+               return nfp_fl_set_tport(act, offset, &set_act->set_tport,
+                                       NFP_FL_ACTION_OPCODE_SET_TCP);
+       case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP:
+               return nfp_fl_set_tport(act, offset, &set_act->set_tport,
+                                       NFP_FL_ACTION_OPCODE_SET_UDP);
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
+static int
+nfp_flower_output_action(struct nfp_app *app,
+                        const struct flow_action_entry *act,
                         struct nfp_fl_payload *nfp_fl, int *a_len,
                         struct net_device *netdev, bool last,
                         enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
@@ -776,7 +776,8 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
                       struct nfp_fl_payload *nfp_fl, int *a_len,
                       struct net_device *netdev,
                       enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
-                      int *out_cnt, u32 *csum_updated)
+                      int *out_cnt, u32 *csum_updated,
+                      struct nfp_flower_pedit_acts *set_act)
 {
        struct nfp_fl_set_ipv4_udp_tun *set_tun;
        struct nfp_fl_pre_tunnel *pre_tun;
@@ -861,7 +862,7 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
                return 0;
        case FLOW_ACTION_MANGLE:
                if (nfp_fl_pedit(act, flow, &nfp_fl->action_data[*a_len],
-                                a_len, csum_updated))
+                                a_len, csum_updated, set_act))
                        return -EOPNOTSUPP;
                break;
        case FLOW_ACTION_CSUM:
@@ -881,12 +882,49 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
        return 0;
 }
 
+static bool nfp_fl_check_mangle_start(struct flow_action *flow_act,
+                                     int current_act_idx)
+{
+       struct flow_action_entry current_act;
+       struct flow_action_entry prev_act;
+
+       current_act = flow_act->entries[current_act_idx];
+       if (current_act.id != FLOW_ACTION_MANGLE)
+               return false;
+
+       if (current_act_idx == 0)
+               return true;
+
+       prev_act = flow_act->entries[current_act_idx - 1];
+
+       return prev_act.id != FLOW_ACTION_MANGLE;
+}
+
+static bool nfp_fl_check_mangle_end(struct flow_action *flow_act,
+                                   int current_act_idx)
+{
+       struct flow_action_entry current_act;
+       struct flow_action_entry next_act;
+
+       current_act = flow_act->entries[current_act_idx];
+       if (current_act.id != FLOW_ACTION_MANGLE)
+               return false;
+
+       if (current_act_idx == flow_act->num_entries)
+               return true;
+
+       next_act = flow_act->entries[current_act_idx + 1];
+
+       return next_act.id != FLOW_ACTION_MANGLE;
+}
+
 int nfp_flower_compile_action(struct nfp_app *app,
                              struct tc_cls_flower_offload *flow,
                              struct net_device *netdev,
                              struct nfp_fl_payload *nfp_flow)
 {
        int act_len, act_cnt, err, tun_out_cnt, out_cnt, i;
+       struct nfp_flower_pedit_acts set_act;
        enum nfp_flower_tun_type tun_type;
        struct flow_action_entry *act;
        u32 csum_updated = 0;
@@ -900,12 +938,18 @@ int nfp_flower_compile_action(struct nfp_app *app,
        out_cnt = 0;
 
        flow_action_for_each(i, act, &flow->rule->action) {
+               if (nfp_fl_check_mangle_start(&flow->rule->action, i))
+                       memset(&set_act, 0, sizeof(set_act));
                err = nfp_flower_loop_action(app, act, flow, nfp_flow, &act_len,
                                             netdev, &tun_type, &tun_out_cnt,
-                                            &out_cnt, &csum_updated);
+                                            &out_cnt, &csum_updated, &set_act);
                if (err)
                        return err;
                act_cnt++;
+               if (nfp_fl_check_mangle_end(&flow->rule->action, i))
+                       nfp_fl_commit_mangle(flow,
+                                            &nfp_flow->action_data[act_len],
+                                            &act_len, &set_act, &csum_updated);
        }
 
        /* We optimise when the action list is small, this can unfortunately
index cf9e1118ee8fff37301de7ef4160ab3b99cc2f17..2054a2f0bbc414a1fc59856deba69159ff189067 100644 (file)
@@ -159,7 +159,7 @@ nfp_flower_cmsg_portmod_rx(struct nfp_app *app, struct sk_buff *skb)
 
        rtnl_lock();
        rcu_read_lock();
-       netdev = nfp_app_repr_get(app, be32_to_cpu(msg->portnum));
+       netdev = nfp_app_dev_get(app, be32_to_cpu(msg->portnum), NULL);
        rcu_read_unlock();
        if (!netdev) {
                nfp_flower_cmsg_warn(app, "ctrl msg for unknown port 0x%08x\n",
@@ -192,7 +192,7 @@ nfp_flower_cmsg_portreify_rx(struct nfp_app *app, struct sk_buff *skb)
        msg = nfp_flower_cmsg_get_data(skb);
 
        rcu_read_lock();
-       exists = !!nfp_app_repr_get(app, be32_to_cpu(msg->portnum));
+       exists = !!nfp_app_dev_get(app, be32_to_cpu(msg->portnum), NULL);
        rcu_read_unlock();
        if (!exists) {
                nfp_flower_cmsg_warn(app, "ctrl msg for unknown port 0x%08x\n",
@@ -204,6 +204,50 @@ nfp_flower_cmsg_portreify_rx(struct nfp_app *app, struct sk_buff *skb)
        wake_up(&priv->reify_wait_queue);
 }
 
+static void
+nfp_flower_cmsg_merge_hint_rx(struct nfp_app *app, struct sk_buff *skb)
+{
+       unsigned int msg_len = nfp_flower_cmsg_get_data_len(skb);
+       struct nfp_flower_cmsg_merge_hint *msg;
+       struct nfp_fl_payload *sub_flows[2];
+       int err, i, flow_cnt;
+
+       msg = nfp_flower_cmsg_get_data(skb);
+       /* msg->count starts at 0 and always assumes at least 1 entry. */
+       flow_cnt = msg->count + 1;
+
+       if (msg_len < struct_size(msg, flow, flow_cnt)) {
+               nfp_flower_cmsg_warn(app, "Merge hint ctrl msg too short - %d bytes but expect %ld\n",
+                                    msg_len, struct_size(msg, flow, flow_cnt));
+               return;
+       }
+
+       if (flow_cnt != 2) {
+               nfp_flower_cmsg_warn(app, "Merge hint contains %d flows - two are expected\n",
+                                    flow_cnt);
+               return;
+       }
+
+       rtnl_lock();
+       for (i = 0; i < flow_cnt; i++) {
+               u32 ctx = be32_to_cpu(msg->flow[i].host_ctx);
+
+               sub_flows[i] = nfp_flower_get_fl_payload_from_ctx(app, ctx);
+               if (!sub_flows[i]) {
+                       nfp_flower_cmsg_warn(app, "Invalid flow in merge hint\n");
+                       goto err_rtnl_unlock;
+               }
+       }
+
+       err = nfp_flower_merge_offloaded_flows(app, sub_flows[0], sub_flows[1]);
+       /* Only warn on memory fail. Hint veto will not break functionality. */
+       if (err == -ENOMEM)
+               nfp_flower_cmsg_warn(app, "Flow merge memory fail.\n");
+
+err_rtnl_unlock:
+       rtnl_unlock();
+}
+
 static void
 nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb)
 {
@@ -222,6 +266,12 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb)
        case NFP_FLOWER_CMSG_TYPE_PORT_MOD:
                nfp_flower_cmsg_portmod_rx(app, skb);
                break;
+       case NFP_FLOWER_CMSG_TYPE_MERGE_HINT:
+               if (app_priv->flower_ext_feats & NFP_FL_FEATS_FLOW_MERGE) {
+                       nfp_flower_cmsg_merge_hint_rx(app, skb);
+                       break;
+               }
+               goto err_default;
        case NFP_FLOWER_CMSG_TYPE_NO_NEIGH:
                nfp_tunnel_request_route(app, skb);
                break;
@@ -235,6 +285,7 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb)
                }
                /* fall through */
        default:
+err_default:
                nfp_flower_cmsg_warn(app, "Cannot handle invalid repr control type %u\n",
                                     type);
                goto out;
index 4fcaf11ed56ed6f2ee89dde5313962676fba9f92..a10c29ade5c208e58ff4e3ecd0eb9ee0e9136fe7 100644 (file)
@@ -26,7 +26,7 @@
 #define NFP_FLOWER_LAYER2_GENEVE_OP    BIT(6)
 
 #define NFP_FLOWER_MASK_VLAN_PRIO      GENMASK(15, 13)
-#define NFP_FLOWER_MASK_VLAN_CFI       BIT(12)
+#define NFP_FLOWER_MASK_VLAN_PRESENT   BIT(12)
 #define NFP_FLOWER_MASK_VLAN_VID       GENMASK(11, 0)
 
 #define NFP_FLOWER_MASK_MPLS_LB                GENMASK(31, 12)
@@ -82,7 +82,6 @@
 #define NFP_FL_OUT_FLAGS_TYPE_IDX      GENMASK(2, 0)
 
 #define NFP_FL_PUSH_VLAN_PRIO          GENMASK(15, 13)
-#define NFP_FL_PUSH_VLAN_CFI           BIT(12)
 #define NFP_FL_PUSH_VLAN_VID           GENMASK(11, 0)
 
 #define IPV6_FLOW_LABEL_MASK           cpu_to_be32(0x000fffff)
@@ -403,11 +402,13 @@ struct nfp_flower_cmsg_hdr {
 /* Types defined for port related control messages  */
 enum nfp_flower_cmsg_type_port {
        NFP_FLOWER_CMSG_TYPE_FLOW_ADD =         0,
+       NFP_FLOWER_CMSG_TYPE_FLOW_MOD =         1,
        NFP_FLOWER_CMSG_TYPE_FLOW_DEL =         2,
        NFP_FLOWER_CMSG_TYPE_LAG_CONFIG =       4,
        NFP_FLOWER_CMSG_TYPE_PORT_REIFY =       6,
        NFP_FLOWER_CMSG_TYPE_MAC_REPR =         7,
        NFP_FLOWER_CMSG_TYPE_PORT_MOD =         8,
+       NFP_FLOWER_CMSG_TYPE_MERGE_HINT =       9,
        NFP_FLOWER_CMSG_TYPE_NO_NEIGH =         10,
        NFP_FLOWER_CMSG_TYPE_TUN_MAC =          11,
        NFP_FLOWER_CMSG_TYPE_ACTIVE_TUNS =      12,
@@ -452,6 +453,16 @@ struct nfp_flower_cmsg_portreify {
 
 #define NFP_FLOWER_CMSG_PORTREIFY_INFO_EXIST   BIT(0)
 
+/* NFP_FLOWER_CMSG_TYPE_FLOW_MERGE_HINT */
+struct nfp_flower_cmsg_merge_hint {
+       u8 reserved[3];
+       u8 count;
+       struct {
+               __be32 host_ctx;
+               __be64 host_cookie;
+       } __packed flow[0];
+};
+
 enum nfp_flower_cmsg_port_type {
        NFP_FLOWER_CMSG_PORT_TYPE_UNSPEC =      0x0,
        NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT =   0x1,
@@ -474,6 +485,13 @@ enum nfp_flower_cmsg_port_vnic_type {
 #define NFP_FLOWER_CMSG_PORT_PCIE_Q            GENMASK(5, 0)
 #define NFP_FLOWER_CMSG_PORT_PHYS_PORT_NUM     GENMASK(7, 0)
 
+static inline u32 nfp_flower_internal_port_get_port_id(u8 internal_port)
+{
+       return FIELD_PREP(NFP_FLOWER_CMSG_PORT_PHYS_PORT_NUM, internal_port) |
+               FIELD_PREP(NFP_FLOWER_CMSG_PORT_TYPE,
+                          NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT);
+}
+
 static inline u32 nfp_flower_cmsg_phys_port(u8 phys_port)
 {
        return FIELD_PREP(NFP_FLOWER_CMSG_PORT_PHYS_PORT_NUM, phys_port) |
index 408089133599b55d686f8b876f43e2379e71ccd3..d476917c8f7d7458b2a30b878ce2b7846f270c37 100644 (file)
@@ -22,6 +22,9 @@
 
 #define NFP_FLOWER_ALLOWED_VER 0x0001000000010000UL
 
+#define NFP_MIN_INT_PORT_ID    1
+#define NFP_MAX_INT_PORT_ID    256
+
 static const char *nfp_flower_extra_cap(struct nfp_app *app, struct nfp_net *nn)
 {
        return "FLOWER";
@@ -32,6 +35,113 @@ static enum devlink_eswitch_mode eswitch_mode_get(struct nfp_app *app)
        return DEVLINK_ESWITCH_MODE_SWITCHDEV;
 }
 
+static int
+nfp_flower_lookup_internal_port_id(struct nfp_flower_priv *priv,
+                                  struct net_device *netdev)
+{
+       struct net_device *entry;
+       int i, id = 0;
+
+       rcu_read_lock();
+       idr_for_each_entry(&priv->internal_ports.port_ids, entry, i)
+               if (entry == netdev) {
+                       id = i;
+                       break;
+               }
+       rcu_read_unlock();
+
+       return id;
+}
+
+static int
+nfp_flower_get_internal_port_id(struct nfp_app *app, struct net_device *netdev)
+{
+       struct nfp_flower_priv *priv = app->priv;
+       int id;
+
+       id = nfp_flower_lookup_internal_port_id(priv, netdev);
+       if (id > 0)
+               return id;
+
+       idr_preload(GFP_ATOMIC);
+       spin_lock_bh(&priv->internal_ports.lock);
+       id = idr_alloc(&priv->internal_ports.port_ids, netdev,
+                      NFP_MIN_INT_PORT_ID, NFP_MAX_INT_PORT_ID, GFP_ATOMIC);
+       spin_unlock_bh(&priv->internal_ports.lock);
+       idr_preload_end();
+
+       return id;
+}
+
+u32 nfp_flower_get_port_id_from_netdev(struct nfp_app *app,
+                                      struct net_device *netdev)
+{
+       int ext_port;
+
+       if (nfp_netdev_is_nfp_repr(netdev)) {
+               return nfp_repr_get_port_id(netdev);
+       } else if (nfp_flower_internal_port_can_offload(app, netdev)) {
+               ext_port = nfp_flower_get_internal_port_id(app, netdev);
+               if (ext_port < 0)
+                       return 0;
+
+               return nfp_flower_internal_port_get_port_id(ext_port);
+       }
+
+       return 0;
+}
+
+static struct net_device *
+nfp_flower_get_netdev_from_internal_port_id(struct nfp_app *app, int port_id)
+{
+       struct nfp_flower_priv *priv = app->priv;
+       struct net_device *netdev;
+
+       rcu_read_lock();
+       netdev = idr_find(&priv->internal_ports.port_ids, port_id);
+       rcu_read_unlock();
+
+       return netdev;
+}
+
+static void
+nfp_flower_free_internal_port_id(struct nfp_app *app, struct net_device *netdev)
+{
+       struct nfp_flower_priv *priv = app->priv;
+       int id;
+
+       id = nfp_flower_lookup_internal_port_id(priv, netdev);
+       if (!id)
+               return;
+
+       spin_lock_bh(&priv->internal_ports.lock);
+       idr_remove(&priv->internal_ports.port_ids, id);
+       spin_unlock_bh(&priv->internal_ports.lock);
+}
+
+static int
+nfp_flower_internal_port_event_handler(struct nfp_app *app,
+                                      struct net_device *netdev,
+                                      unsigned long event)
+{
+       if (event == NETDEV_UNREGISTER &&
+           nfp_flower_internal_port_can_offload(app, netdev))
+               nfp_flower_free_internal_port_id(app, netdev);
+
+       return NOTIFY_OK;
+}
+
+static void nfp_flower_internal_port_init(struct nfp_flower_priv *priv)
+{
+       spin_lock_init(&priv->internal_ports.lock);
+       idr_init(&priv->internal_ports.port_ids);
+}
+
+static void nfp_flower_internal_port_cleanup(struct nfp_flower_priv *priv)
+{
+       idr_destroy(&priv->internal_ports.port_ids);
+}
+
 static struct nfp_flower_non_repr_priv *
 nfp_flower_non_repr_priv_lookup(struct nfp_app *app, struct net_device *netdev)
 {
@@ -119,12 +229,21 @@ nfp_flower_repr_get_type_and_port(struct nfp_app *app, u32 port_id, u8 *port)
 }
 
 static struct net_device *
-nfp_flower_repr_get(struct nfp_app *app, u32 port_id)
+nfp_flower_dev_get(struct nfp_app *app, u32 port_id, bool *redir_egress)
 {
        enum nfp_repr_type repr_type;
        struct nfp_reprs *reprs;
        u8 port = 0;
 
+       /* Check if the port is internal. */
+       if (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port_id) ==
+           NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT) {
+               if (redir_egress)
+                       *redir_egress = true;
+               port = FIELD_GET(NFP_FLOWER_CMSG_PORT_PHYS_PORT_NUM, port_id);
+               return nfp_flower_get_netdev_from_internal_port_id(app, port);
+       }
+
        repr_type = nfp_flower_repr_get_type_and_port(app, port_id, &port);
        if (repr_type > NFP_REPR_TYPE_MAX)
                return NULL;
@@ -641,11 +760,30 @@ static int nfp_flower_init(struct nfp_app *app)
                goto err_cleanup_metadata;
        }
 
+       if (app_priv->flower_ext_feats & NFP_FL_FEATS_FLOW_MOD) {
+               /* Tell the firmware that the driver supports flow merging. */
+               err = nfp_rtsym_write_le(app->pf->rtbl,
+                                        "_abi_flower_merge_hint_enable", 1);
+               if (!err) {
+                       app_priv->flower_ext_feats |= NFP_FL_FEATS_FLOW_MERGE;
+                       nfp_flower_internal_port_init(app_priv);
+               } else if (err == -ENOENT) {
+                       nfp_warn(app->cpp, "Flow merge not supported by FW.\n");
+               } else {
+                       goto err_lag_clean;
+               }
+       } else {
+               nfp_warn(app->cpp, "Flow mod/merge not supported by FW.\n");
+       }
+
        INIT_LIST_HEAD(&app_priv->indr_block_cb_priv);
        INIT_LIST_HEAD(&app_priv->non_repr_priv);
 
        return 0;
 
+err_lag_clean:
+       if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG)
+               nfp_flower_lag_cleanup(&app_priv->nfp_lag);
 err_cleanup_metadata:
        nfp_flower_metadata_cleanup(app);
 err_free_app_priv:
@@ -664,6 +802,9 @@ static void nfp_flower_clean(struct nfp_app *app)
        if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG)
                nfp_flower_lag_cleanup(&app_priv->nfp_lag);
 
+       if (app_priv->flower_ext_feats & NFP_FL_FEATS_FLOW_MERGE)
+               nfp_flower_internal_port_cleanup(app_priv);
+
        nfp_flower_metadata_cleanup(app);
        vfree(app->priv);
        app->priv = NULL;
@@ -762,6 +903,10 @@ nfp_flower_netdev_event(struct nfp_app *app, struct net_device *netdev,
        if (ret & NOTIFY_STOP_MASK)
                return ret;
 
+       ret = nfp_flower_internal_port_event_handler(app, netdev, event);
+       if (ret & NOTIFY_STOP_MASK)
+               return ret;
+
        return nfp_tunnel_mac_event_handler(app, netdev, event, ptr);
 }
 
@@ -800,7 +945,7 @@ const struct nfp_app_type app_flower = {
        .sriov_disable  = nfp_flower_sriov_disable,
 
        .eswitch_mode_get  = eswitch_mode_get,
-       .repr_get       = nfp_flower_repr_get,
+       .dev_get        = nfp_flower_dev_get,
 
        .setup_tc       = nfp_flower_setup_tc,
 };
index f6ca8dc9cc9254794ba8a20b9d5bc58d1f3d800a..675f43f06526c86144641d8dc3c0c717a451edcb 100644 (file)
@@ -39,6 +39,8 @@ struct nfp_app;
 #define NFP_FL_NBI_MTU_SETTING         BIT(1)
 #define NFP_FL_FEATS_GENEVE_OPT                BIT(2)
 #define NFP_FL_FEATS_VLAN_PCP          BIT(3)
+#define NFP_FL_FEATS_FLOW_MOD          BIT(5)
+#define NFP_FL_FEATS_FLOW_MERGE                BIT(30)
 #define NFP_FL_FEATS_LAG               BIT(31)
 
 struct nfp_fl_mask_id {
@@ -114,6 +116,16 @@ struct nfp_fl_lag {
        struct sk_buff_head retrans_skbs;
 };
 
+/**
+ * struct nfp_fl_internal_ports - Flower APP priv data for additional ports
+ * @port_ids:  Assignment of ids to any additional ports
+ * @lock:      Lock for extra ports list
+ */
+struct nfp_fl_internal_ports {
+       struct idr port_ids;
+       spinlock_t lock;
+};
+
 /**
  * struct nfp_flower_priv - Flower APP per-vNIC priv data
  * @app:               Back pointer to app
@@ -128,6 +140,7 @@ struct nfp_fl_lag {
  * @flow_table:                Hash table used to store flower rules
  * @stats:             Stored stats updates for flower rules
  * @stats_lock:                Lock for flower rule stats updates
+ * @stats_ctx_table:   Hash table to map stats contexts to its flow rule
  * @cmsg_work:         Workqueue for control messages processing
  * @cmsg_skbs_high:    List of higher priority skbs for control message
  *                     processing
@@ -143,6 +156,7 @@ struct nfp_fl_lag {
  * @non_repr_priv:     List of offloaded non-repr ports and their priv data
  * @active_mem_unit:   Current active memory unit for flower rules
  * @total_mem_units:   Total number of available memory units for flower rules
+ * @internal_ports:    Internal port ids used in offloaded rules
  */
 struct nfp_flower_priv {
        struct nfp_app *app;
@@ -157,6 +171,7 @@ struct nfp_flower_priv {
        struct rhashtable flow_table;
        struct nfp_fl_stats *stats;
        spinlock_t stats_lock; /* lock stats */
+       struct rhashtable stats_ctx_table;
        struct work_struct cmsg_work;
        struct sk_buff_head cmsg_skbs_high;
        struct sk_buff_head cmsg_skbs_low;
@@ -169,6 +184,7 @@ struct nfp_flower_priv {
        struct list_head non_repr_priv;
        unsigned int active_mem_unit;
        unsigned int total_mem_units;
+       struct nfp_fl_internal_ports internal_ports;
 };
 
 /**
@@ -236,6 +252,25 @@ struct nfp_fl_payload {
        char *unmasked_data;
        char *mask_data;
        char *action_data;
+       struct list_head linked_flows;
+       bool in_hw;
+};
+
+struct nfp_fl_payload_link {
+       /* A link contains a pointer to a merge flow and an associated sub_flow.
+        * Each merge flow will feature in 2 links to its underlying sub_flows.
+        * A sub_flow will have at least 1 link to a merge flow or more if it
+        * has been used to create multiple merge flows.
+        *
+        * For a merge flow, 'linked_flows' in its nfp_fl_payload struct lists
+        * all links to sub_flows (sub_flow.flow) via merge.list.
+        * For a sub_flow, 'linked_flows' gives all links to merge flows it has
+        * formed (merge_flow.flow) via sub_flow.list.
+        */
+       struct {
+               struct list_head list;
+               struct nfp_fl_payload *flow;
+       } merge_flow, sub_flow;
 };
 
 extern const struct rhashtable_params nfp_flower_table_params;
@@ -247,12 +282,40 @@ struct nfp_fl_stats_frame {
        __be64 stats_cookie;
 };
 
+static inline bool
+nfp_flower_internal_port_can_offload(struct nfp_app *app,
+                                    struct net_device *netdev)
+{
+       struct nfp_flower_priv *app_priv = app->priv;
+
+       if (!(app_priv->flower_ext_feats & NFP_FL_FEATS_FLOW_MERGE))
+               return false;
+       if (!netdev->rtnl_link_ops)
+               return false;
+       if (!strcmp(netdev->rtnl_link_ops->kind, "openvswitch"))
+               return true;
+
+       return false;
+}
+
+/* The address of the merged flow acts as its cookie.
+ * Cookies supplied to us by TC flower are also addresses to allocated
+ * memory and thus this scheme should not generate any collisions.
+ */
+static inline bool nfp_flower_is_merge_flow(struct nfp_fl_payload *flow_pay)
+{
+       return flow_pay->tc_flower_cookie == (unsigned long)flow_pay;
+}
+
 int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
                             unsigned int host_ctx_split);
 void nfp_flower_metadata_cleanup(struct nfp_app *app);
 
 int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
                        enum tc_setup_type type, void *type_data);
+int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
+                                    struct nfp_fl_payload *sub_flow1,
+                                    struct nfp_fl_payload *sub_flow2);
 int nfp_flower_compile_flow_match(struct nfp_app *app,
                                  struct tc_cls_flower_offload *flow,
                                  struct nfp_fl_key_ls *key_ls,
@@ -267,6 +330,8 @@ int nfp_compile_flow_metadata(struct nfp_app *app,
                              struct tc_cls_flower_offload *flow,
                              struct nfp_fl_payload *nfp_flow,
                              struct net_device *netdev);
+void __nfp_modify_flow_metadata(struct nfp_flower_priv *priv,
+                               struct nfp_fl_payload *nfp_flow);
 int nfp_modify_flow_metadata(struct nfp_app *app,
                             struct nfp_fl_payload *nfp_flow);
 
@@ -274,6 +339,8 @@ struct nfp_fl_payload *
 nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie,
                           struct net_device *netdev);
 struct nfp_fl_payload *
+nfp_flower_get_fl_payload_from_ctx(struct nfp_app *app, u32 ctx_id);
+struct nfp_fl_payload *
 nfp_flower_remove_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie);
 
 void nfp_flower_rx_flow_stats(struct nfp_app *app, struct sk_buff *skb);
@@ -311,4 +378,6 @@ void
 __nfp_flower_non_repr_priv_put(struct nfp_flower_non_repr_priv *non_repr_priv);
 void
 nfp_flower_non_repr_priv_put(struct nfp_app *app, struct net_device *netdev);
+u32 nfp_flower_get_port_id_from_netdev(struct nfp_app *app,
+                                      struct net_device *netdev);
 #endif
index e03c8ef2c28c525b7fb44b4ce8f3e294d3d4fef6..bfa4bf34911daf87ba27d2c47ed49e759fb55143 100644 (file)
@@ -30,20 +30,19 @@ nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *ext,
 
                flow_rule_match_vlan(rule, &match);
                /* Populate the tci field. */
-               if (match.key->vlan_id || match.key->vlan_priority) {
-                       tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
-                                            match.key->vlan_priority) |
-                                 FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
-                                            match.key->vlan_id) |
-                                 NFP_FLOWER_MASK_VLAN_CFI;
-                       ext->tci = cpu_to_be16(tmp_tci);
-                       tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
-                                            match.mask->vlan_priority) |
-                                 FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
-                                            match.mask->vlan_id) |
-                                 NFP_FLOWER_MASK_VLAN_CFI;
-                       msk->tci = cpu_to_be16(tmp_tci);
-               }
+               tmp_tci = NFP_FLOWER_MASK_VLAN_PRESENT;
+               tmp_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
+                                     match.key->vlan_priority) |
+                          FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
+                                     match.key->vlan_id);
+               ext->tci = cpu_to_be16(tmp_tci);
+
+               tmp_tci = NFP_FLOWER_MASK_VLAN_PRESENT;
+               tmp_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
+                                     match.mask->vlan_priority) |
+                          FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
+                                     match.mask->vlan_id);
+               msk->tci = cpu_to_be16(tmp_tci);
        }
 }
 
@@ -327,13 +326,12 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
                                  struct nfp_fl_payload *nfp_flow,
                                  enum nfp_flower_tun_type tun_type)
 {
-       u32 cmsg_port = 0;
+       u32 port_id;
        int err;
        u8 *ext;
        u8 *msk;
 
-       if (nfp_netdev_is_nfp_repr(netdev))
-               cmsg_port = nfp_repr_get_port_id(netdev);
+       port_id = nfp_flower_get_port_id_from_netdev(app, netdev);
 
        memset(nfp_flow->unmasked_data, 0, key_ls->key_size);
        memset(nfp_flow->mask_data, 0, key_ls->key_size);
@@ -359,13 +357,13 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
 
        /* Populate Exact Port data. */
        err = nfp_flower_compile_port((struct nfp_flower_in_port *)ext,
-                                     cmsg_port, false, tun_type);
+                                     port_id, false, tun_type);
        if (err)
                return err;
 
        /* Populate Mask Port Data. */
        err = nfp_flower_compile_port((struct nfp_flower_in_port *)msk,
-                                     cmsg_port, true, tun_type);
+                                     port_id, true, tun_type);
        if (err)
                return err;
 
index 492837b852b6d3652123cc187d24dbf68bed6a19..3d326efdc814f1b50efc8913ab252e91a2669924 100644 (file)
@@ -24,6 +24,18 @@ struct nfp_fl_flow_table_cmp_arg {
        unsigned long cookie;
 };
 
+struct nfp_fl_stats_ctx_to_flow {
+       struct rhash_head ht_node;
+       u32 stats_cxt;
+       struct nfp_fl_payload *flow;
+};
+
+static const struct rhashtable_params stats_ctx_table_params = {
+       .key_offset     = offsetof(struct nfp_fl_stats_ctx_to_flow, stats_cxt),
+       .head_offset    = offsetof(struct nfp_fl_stats_ctx_to_flow, ht_node),
+       .key_len        = sizeof(u32),
+};
+
 static int nfp_release_stats_entry(struct nfp_app *app, u32 stats_context_id)
 {
        struct nfp_flower_priv *priv = app->priv;
@@ -264,9 +276,6 @@ nfp_check_mask_remove(struct nfp_app *app, char *mask_data, u32 mask_len,
        if (!mask_entry)
                return false;
 
-       if (meta_flags)
-               *meta_flags &= ~NFP_FL_META_FLAG_MANAGE_MASK;
-
        *mask_id = mask_entry->mask_id;
        mask_entry->ref_cnt--;
        if (!mask_entry->ref_cnt) {
@@ -285,25 +294,42 @@ int nfp_compile_flow_metadata(struct nfp_app *app,
                              struct nfp_fl_payload *nfp_flow,
                              struct net_device *netdev)
 {
+       struct nfp_fl_stats_ctx_to_flow *ctx_entry;
        struct nfp_flower_priv *priv = app->priv;
        struct nfp_fl_payload *check_entry;
        u8 new_mask_id;
        u32 stats_cxt;
+       int err;
 
-       if (nfp_get_stats_entry(app, &stats_cxt))
-               return -ENOENT;
+       err = nfp_get_stats_entry(app, &stats_cxt);
+       if (err)
+               return err;
 
        nfp_flow->meta.host_ctx_id = cpu_to_be32(stats_cxt);
        nfp_flow->meta.host_cookie = cpu_to_be64(flow->cookie);
        nfp_flow->ingress_dev = netdev;
 
+       ctx_entry = kzalloc(sizeof(*ctx_entry), GFP_KERNEL);
+       if (!ctx_entry) {
+               err = -ENOMEM;
+               goto err_release_stats;
+       }
+
+       ctx_entry->stats_cxt = stats_cxt;
+       ctx_entry->flow = nfp_flow;
+
+       if (rhashtable_insert_fast(&priv->stats_ctx_table, &ctx_entry->ht_node,
+                                  stats_ctx_table_params)) {
+               err = -ENOMEM;
+               goto err_free_ctx_entry;
+       }
+
        new_mask_id = 0;
        if (!nfp_check_mask_add(app, nfp_flow->mask_data,
                                nfp_flow->meta.mask_len,
                                &nfp_flow->meta.flags, &new_mask_id)) {
-               if (nfp_release_stats_entry(app, stats_cxt))
-                       return -EINVAL;
-               return -ENOENT;
+               err = -ENOENT;
+               goto err_remove_rhash;
        }
 
        nfp_flow->meta.flow_version = cpu_to_be64(priv->flower_version);
@@ -317,43 +343,82 @@ int nfp_compile_flow_metadata(struct nfp_app *app,
 
        check_entry = nfp_flower_search_fl_table(app, flow->cookie, netdev);
        if (check_entry) {
-               if (nfp_release_stats_entry(app, stats_cxt))
-                       return -EINVAL;
-
-               if (!nfp_check_mask_remove(app, nfp_flow->mask_data,
-                                          nfp_flow->meta.mask_len,
-                                          NULL, &new_mask_id))
-                       return -EINVAL;
-
-               return -EEXIST;
+               err = -EEXIST;
+               goto err_remove_mask;
        }
 
        return 0;
+
+err_remove_mask:
+       nfp_check_mask_remove(app, nfp_flow->mask_data, nfp_flow->meta.mask_len,
+                             NULL, &new_mask_id);
+err_remove_rhash:
+       WARN_ON_ONCE(rhashtable_remove_fast(&priv->stats_ctx_table,
+                                           &ctx_entry->ht_node,
+                                           stats_ctx_table_params));
+err_free_ctx_entry:
+       kfree(ctx_entry);
+err_release_stats:
+       nfp_release_stats_entry(app, stats_cxt);
+
+       return err;
+}
+
+void __nfp_modify_flow_metadata(struct nfp_flower_priv *priv,
+                               struct nfp_fl_payload *nfp_flow)
+{
+       nfp_flow->meta.flags &= ~NFP_FL_META_FLAG_MANAGE_MASK;
+       nfp_flow->meta.flow_version = cpu_to_be64(priv->flower_version);
+       priv->flower_version++;
 }
 
 int nfp_modify_flow_metadata(struct nfp_app *app,
                             struct nfp_fl_payload *nfp_flow)
 {
+       struct nfp_fl_stats_ctx_to_flow *ctx_entry;
        struct nfp_flower_priv *priv = app->priv;
        u8 new_mask_id = 0;
        u32 temp_ctx_id;
 
+       __nfp_modify_flow_metadata(priv, nfp_flow);
+
        nfp_check_mask_remove(app, nfp_flow->mask_data,
                              nfp_flow->meta.mask_len, &nfp_flow->meta.flags,
                              &new_mask_id);
 
-       nfp_flow->meta.flow_version = cpu_to_be64(priv->flower_version);
-       priv->flower_version++;
-
        /* Update flow payload with mask ids. */
        nfp_flow->unmasked_data[NFP_FL_MASK_ID_LOCATION] = new_mask_id;
 
-       /* Release the stats ctx id. */
+       /* Release the stats ctx id and ctx to flow table entry. */
        temp_ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id);
 
+       ctx_entry = rhashtable_lookup_fast(&priv->stats_ctx_table, &temp_ctx_id,
+                                          stats_ctx_table_params);
+       if (!ctx_entry)
+               return -ENOENT;
+
+       WARN_ON_ONCE(rhashtable_remove_fast(&priv->stats_ctx_table,
+                                           &ctx_entry->ht_node,
+                                           stats_ctx_table_params));
+       kfree(ctx_entry);
+
        return nfp_release_stats_entry(app, temp_ctx_id);
 }
 
+struct nfp_fl_payload *
+nfp_flower_get_fl_payload_from_ctx(struct nfp_app *app, u32 ctx_id)
+{
+       struct nfp_fl_stats_ctx_to_flow *ctx_entry;
+       struct nfp_flower_priv *priv = app->priv;
+
+       ctx_entry = rhashtable_lookup_fast(&priv->stats_ctx_table, &ctx_id,
+                                          stats_ctx_table_params);
+       if (!ctx_entry)
+               return NULL;
+
+       return ctx_entry->flow;
+}
+
 static int nfp_fl_obj_cmpfn(struct rhashtable_compare_arg *arg,
                            const void *obj)
 {
@@ -403,6 +468,10 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
        if (err)
                return err;
 
+       err = rhashtable_init(&priv->stats_ctx_table, &stats_ctx_table_params);
+       if (err)
+               goto err_free_flow_table;
+
        get_random_bytes(&priv->mask_id_seed, sizeof(priv->mask_id_seed));
 
        /* Init ring buffer and unallocated mask_ids. */
@@ -410,7 +479,7 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
                kmalloc_array(NFP_FLOWER_MASK_ENTRY_RS,
                              NFP_FLOWER_MASK_ELEMENT_RS, GFP_KERNEL);
        if (!priv->mask_ids.mask_id_free_list.buf)
-               goto err_free_flow_table;
+               goto err_free_stats_ctx_table;
 
        priv->mask_ids.init_unallocated = NFP_FLOWER_MASK_ENTRY_RS - 1;
 
@@ -447,6 +516,8 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
        kfree(priv->mask_ids.last_used);
 err_free_mask_id:
        kfree(priv->mask_ids.mask_id_free_list.buf);
+err_free_stats_ctx_table:
+       rhashtable_destroy(&priv->stats_ctx_table);
 err_free_flow_table:
        rhashtable_destroy(&priv->flow_table);
        return -ENOMEM;
@@ -461,6 +532,8 @@ void nfp_flower_metadata_cleanup(struct nfp_app *app)
 
        rhashtable_free_and_destroy(&priv->flow_table,
                                    nfp_check_rhashtable_empty, NULL);
+       rhashtable_free_and_destroy(&priv->stats_ctx_table,
+                                   nfp_check_rhashtable_empty, NULL);
        kvfree(priv->stats);
        kfree(priv->mask_ids.mask_id_free_list.buf);
        kfree(priv->mask_ids.last_used);
index 9f16920da81dd187d121c9930e6917a995278b3f..aefe211da82c9e2d6884daaead31a1b45a29024b 100644 (file)
         BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
         BIT(FLOW_DISSECTOR_KEY_ENC_PORTS))
 
+#define NFP_FLOWER_MERGE_FIELDS \
+       (NFP_FLOWER_LAYER_PORT | \
+        NFP_FLOWER_LAYER_MAC | \
+        NFP_FLOWER_LAYER_TP | \
+        NFP_FLOWER_LAYER_IPV4 | \
+        NFP_FLOWER_LAYER_IPV6)
+
+struct nfp_flower_merge_check {
+       union {
+               struct {
+                       __be16 tci;
+                       struct nfp_flower_mac_mpls l2;
+                       struct nfp_flower_tp_ports l4;
+                       union {
+                               struct nfp_flower_ipv4 ipv4;
+                               struct nfp_flower_ipv6 ipv6;
+                       };
+               };
+               unsigned long vals[8];
+       };
+};
+
 static int
 nfp_flower_xmit_flow(struct nfp_app *app, struct nfp_fl_payload *nfp_flow,
                     u8 mtype)
@@ -326,7 +348,7 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
                                break;
 
                        case cpu_to_be16(ETH_P_IPV6):
-                               key_layer |= NFP_FLOWER_LAYER_IPV6;
+                                       key_layer |= NFP_FLOWER_LAYER_IPV6;
                                key_size += sizeof(struct nfp_flower_ipv6);
                                break;
 
@@ -376,6 +398,8 @@ nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer)
 
        flow_pay->nfp_tun_ipv4_addr = 0;
        flow_pay->meta.flags = 0;
+       INIT_LIST_HEAD(&flow_pay->linked_flows);
+       flow_pay->in_hw = false;
 
        return flow_pay;
 
@@ -388,6 +412,447 @@ nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer)
        return NULL;
 }
 
+static int
+nfp_flower_update_merge_with_actions(struct nfp_fl_payload *flow,
+                                    struct nfp_flower_merge_check *merge,
+                                    u8 *last_act_id, int *act_out)
+{
+       struct nfp_fl_set_ipv6_tc_hl_fl *ipv6_tc_hl_fl;
+       struct nfp_fl_set_ip4_ttl_tos *ipv4_ttl_tos;
+       struct nfp_fl_set_ip4_addrs *ipv4_add;
+       struct nfp_fl_set_ipv6_addr *ipv6_add;
+       struct nfp_fl_push_vlan *push_vlan;
+       struct nfp_fl_set_tport *tport;
+       struct nfp_fl_set_eth *eth;
+       struct nfp_fl_act_head *a;
+       unsigned int act_off = 0;
+       u8 act_id = 0;
+       u8 *ports;
+       int i;
+
+       while (act_off < flow->meta.act_len) {
+               a = (struct nfp_fl_act_head *)&flow->action_data[act_off];
+               act_id = a->jump_id;
+
+               switch (act_id) {
+               case NFP_FL_ACTION_OPCODE_OUTPUT:
+                       if (act_out)
+                               (*act_out)++;
+                       break;
+               case NFP_FL_ACTION_OPCODE_PUSH_VLAN:
+                       push_vlan = (struct nfp_fl_push_vlan *)a;
+                       if (push_vlan->vlan_tci)
+                               merge->tci = cpu_to_be16(0xffff);
+                       break;
+               case NFP_FL_ACTION_OPCODE_POP_VLAN:
+                       merge->tci = cpu_to_be16(0);
+                       break;
+               case NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL:
+                       /* New tunnel header means l2 to l4 can be matched. */
+                       eth_broadcast_addr(&merge->l2.mac_dst[0]);
+                       eth_broadcast_addr(&merge->l2.mac_src[0]);
+                       memset(&merge->l4, 0xff,
+                              sizeof(struct nfp_flower_tp_ports));
+                       memset(&merge->ipv4, 0xff,
+                              sizeof(struct nfp_flower_ipv4));
+                       break;
+               case NFP_FL_ACTION_OPCODE_SET_ETHERNET:
+                       eth = (struct nfp_fl_set_eth *)a;
+                       for (i = 0; i < ETH_ALEN; i++)
+                               merge->l2.mac_dst[i] |= eth->eth_addr_mask[i];
+                       for (i = 0; i < ETH_ALEN; i++)
+                               merge->l2.mac_src[i] |=
+                                       eth->eth_addr_mask[ETH_ALEN + i];
+                       break;
+               case NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS:
+                       ipv4_add = (struct nfp_fl_set_ip4_addrs *)a;
+                       merge->ipv4.ipv4_src |= ipv4_add->ipv4_src_mask;
+                       merge->ipv4.ipv4_dst |= ipv4_add->ipv4_dst_mask;
+                       break;
+               case NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS:
+                       ipv4_ttl_tos = (struct nfp_fl_set_ip4_ttl_tos *)a;
+                       merge->ipv4.ip_ext.ttl |= ipv4_ttl_tos->ipv4_ttl_mask;
+                       merge->ipv4.ip_ext.tos |= ipv4_ttl_tos->ipv4_tos_mask;
+                       break;
+               case NFP_FL_ACTION_OPCODE_SET_IPV6_SRC:
+                       ipv6_add = (struct nfp_fl_set_ipv6_addr *)a;
+                       for (i = 0; i < 4; i++)
+                               merge->ipv6.ipv6_src.in6_u.u6_addr32[i] |=
+                                       ipv6_add->ipv6[i].mask;
+                       break;
+               case NFP_FL_ACTION_OPCODE_SET_IPV6_DST:
+                       ipv6_add = (struct nfp_fl_set_ipv6_addr *)a;
+                       for (i = 0; i < 4; i++)
+                               merge->ipv6.ipv6_dst.in6_u.u6_addr32[i] |=
+                                       ipv6_add->ipv6[i].mask;
+                       break;
+               case NFP_FL_ACTION_OPCODE_SET_IPV6_TC_HL_FL:
+                       ipv6_tc_hl_fl = (struct nfp_fl_set_ipv6_tc_hl_fl *)a;
+                       merge->ipv6.ip_ext.ttl |=
+                               ipv6_tc_hl_fl->ipv6_hop_limit_mask;
+                       merge->ipv6.ip_ext.tos |= ipv6_tc_hl_fl->ipv6_tc_mask;
+                       merge->ipv6.ipv6_flow_label_exthdr |=
+                               ipv6_tc_hl_fl->ipv6_label_mask;
+                       break;
+               case NFP_FL_ACTION_OPCODE_SET_UDP:
+               case NFP_FL_ACTION_OPCODE_SET_TCP:
+                       tport = (struct nfp_fl_set_tport *)a;
+                       ports = (u8 *)&merge->l4.port_src;
+                       for (i = 0; i < 4; i++)
+                               ports[i] |= tport->tp_port_mask[i];
+                       break;
+               case NFP_FL_ACTION_OPCODE_PRE_TUNNEL:
+               case NFP_FL_ACTION_OPCODE_PRE_LAG:
+               case NFP_FL_ACTION_OPCODE_PUSH_GENEVE:
+                       break;
+               default:
+                       return -EOPNOTSUPP;
+               }
+
+               act_off += a->len_lw << NFP_FL_LW_SIZ;
+       }
+
+       if (last_act_id)
+               *last_act_id = act_id;
+
+       return 0;
+}
+
+static int
+nfp_flower_populate_merge_match(struct nfp_fl_payload *flow,
+                               struct nfp_flower_merge_check *merge,
+                               bool extra_fields)
+{
+       struct nfp_flower_meta_tci *meta_tci;
+       u8 *mask = flow->mask_data;
+       u8 key_layer, match_size;
+
+       memset(merge, 0, sizeof(struct nfp_flower_merge_check));
+
+       meta_tci = (struct nfp_flower_meta_tci *)mask;
+       key_layer = meta_tci->nfp_flow_key_layer;
+
+       if (key_layer & ~NFP_FLOWER_MERGE_FIELDS && !extra_fields)
+               return -EOPNOTSUPP;
+
+       merge->tci = meta_tci->tci;
+       mask += sizeof(struct nfp_flower_meta_tci);
+
+       if (key_layer & NFP_FLOWER_LAYER_EXT_META)
+               mask += sizeof(struct nfp_flower_ext_meta);
+
+       mask += sizeof(struct nfp_flower_in_port);
+
+       if (key_layer & NFP_FLOWER_LAYER_MAC) {
+               match_size = sizeof(struct nfp_flower_mac_mpls);
+               memcpy(&merge->l2, mask, match_size);
+               mask += match_size;
+       }
+
+       if (key_layer & NFP_FLOWER_LAYER_TP) {
+               match_size = sizeof(struct nfp_flower_tp_ports);
+               memcpy(&merge->l4, mask, match_size);
+               mask += match_size;
+       }
+
+       if (key_layer & NFP_FLOWER_LAYER_IPV4) {
+               match_size = sizeof(struct nfp_flower_ipv4);
+               memcpy(&merge->ipv4, mask, match_size);
+       }
+
+       if (key_layer & NFP_FLOWER_LAYER_IPV6) {
+               match_size = sizeof(struct nfp_flower_ipv6);
+               memcpy(&merge->ipv6, mask, match_size);
+       }
+
+       return 0;
+}
+
+static int
+nfp_flower_can_merge(struct nfp_fl_payload *sub_flow1,
+                    struct nfp_fl_payload *sub_flow2)
+{
+       /* Two flows can be merged if sub_flow2 only matches on bits that are
+        * either matched by sub_flow1 or set by a sub_flow1 action. This
+        * ensures that every packet that hits sub_flow1 and recirculates is
+        * guaranteed to hit sub_flow2.
+        */
+       struct nfp_flower_merge_check sub_flow1_merge, sub_flow2_merge;
+       int err, act_out = 0;
+       u8 last_act_id = 0;
+
+       err = nfp_flower_populate_merge_match(sub_flow1, &sub_flow1_merge,
+                                             true);
+       if (err)
+               return err;
+
+       err = nfp_flower_populate_merge_match(sub_flow2, &sub_flow2_merge,
+                                             false);
+       if (err)
+               return err;
+
+       err = nfp_flower_update_merge_with_actions(sub_flow1, &sub_flow1_merge,
+                                                  &last_act_id, &act_out);
+       if (err)
+               return err;
+
+       /* Must only be 1 output action and it must be the last in sequence. */
+       if (act_out != 1 || last_act_id != NFP_FL_ACTION_OPCODE_OUTPUT)
+               return -EOPNOTSUPP;
+
+       /* Reject merge if sub_flow2 matches on something that is not matched
+        * on or set in an action by sub_flow1.
+        */
+       err = bitmap_andnot(sub_flow2_merge.vals, sub_flow2_merge.vals,
+                           sub_flow1_merge.vals,
+                           sizeof(struct nfp_flower_merge_check) * 8);
+       if (err)
+               return -EINVAL;
+
+       return 0;
+}
+
+static unsigned int
+nfp_flower_copy_pre_actions(char *act_dst, char *act_src, int len,
+                           bool *tunnel_act)
+{
+       unsigned int act_off = 0, act_len;
+       struct nfp_fl_act_head *a;
+       u8 act_id = 0;
+
+       while (act_off < len) {
+               a = (struct nfp_fl_act_head *)&act_src[act_off];
+               act_len = a->len_lw << NFP_FL_LW_SIZ;
+               act_id = a->jump_id;
+
+               switch (act_id) {
+               case NFP_FL_ACTION_OPCODE_PRE_TUNNEL:
+                       if (tunnel_act)
+                               *tunnel_act = true;
+                       /* fall through */
+               case NFP_FL_ACTION_OPCODE_PRE_LAG:
+                       memcpy(act_dst + act_off, act_src + act_off, act_len);
+                       break;
+               default:
+                       return act_off;
+               }
+
+               act_off += act_len;
+       }
+
+       return act_off;
+}
+
+static int nfp_fl_verify_post_tun_acts(char *acts, int len)
+{
+       struct nfp_fl_act_head *a;
+       unsigned int act_off = 0;
+
+       while (act_off < len) {
+               a = (struct nfp_fl_act_head *)&acts[act_off];
+               if (a->jump_id != NFP_FL_ACTION_OPCODE_OUTPUT)
+                       return -EOPNOTSUPP;
+
+               act_off += a->len_lw << NFP_FL_LW_SIZ;
+       }
+
+       return 0;
+}
+
+static int
+nfp_flower_merge_action(struct nfp_fl_payload *sub_flow1,
+                       struct nfp_fl_payload *sub_flow2,
+                       struct nfp_fl_payload *merge_flow)
+{
+       unsigned int sub1_act_len, sub2_act_len, pre_off1, pre_off2;
+       bool tunnel_act = false;
+       char *merge_act;
+       int err;
+
+       /* The last action of sub_flow1 must be output - do not merge this. */
+       sub1_act_len = sub_flow1->meta.act_len - sizeof(struct nfp_fl_output);
+       sub2_act_len = sub_flow2->meta.act_len;
+
+       if (!sub2_act_len)
+               return -EINVAL;
+
+       if (sub1_act_len + sub2_act_len > NFP_FL_MAX_A_SIZ)
+               return -EINVAL;
+
+       /* A shortcut can only be applied if there is a single action. */
+       if (sub1_act_len)
+               merge_flow->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
+       else
+               merge_flow->meta.shortcut = sub_flow2->meta.shortcut;
+
+       merge_flow->meta.act_len = sub1_act_len + sub2_act_len;
+       merge_act = merge_flow->action_data;
+
+       /* Copy any pre-actions to the start of merge flow action list. */
+       pre_off1 = nfp_flower_copy_pre_actions(merge_act,
+                                              sub_flow1->action_data,
+                                              sub1_act_len, &tunnel_act);
+       merge_act += pre_off1;
+       sub1_act_len -= pre_off1;
+       pre_off2 = nfp_flower_copy_pre_actions(merge_act,
+                                              sub_flow2->action_data,
+                                              sub2_act_len, NULL);
+       merge_act += pre_off2;
+       sub2_act_len -= pre_off2;
+
+       /* FW does a tunnel push when egressing, therefore, if sub_flow 1 pushes
+        * a tunnel, sub_flow 2 can only have output actions for a valid merge.
+        */
+       if (tunnel_act) {
+               char *post_tun_acts = &sub_flow2->action_data[pre_off2];
+
+               err = nfp_fl_verify_post_tun_acts(post_tun_acts, sub2_act_len);
+               if (err)
+                       return err;
+       }
+
+       /* Copy remaining actions from sub_flows 1 and 2. */
+       memcpy(merge_act, sub_flow1->action_data + pre_off1, sub1_act_len);
+       merge_act += sub1_act_len;
+       memcpy(merge_act, sub_flow2->action_data + pre_off2, sub2_act_len);
+
+       return 0;
+}
+
+/* Flow link code should only be accessed under RTNL. */
+static void nfp_flower_unlink_flow(struct nfp_fl_payload_link *link)
+{
+       list_del(&link->merge_flow.list);
+       list_del(&link->sub_flow.list);
+       kfree(link);
+}
+
+static void nfp_flower_unlink_flows(struct nfp_fl_payload *merge_flow,
+                                   struct nfp_fl_payload *sub_flow)
+{
+       struct nfp_fl_payload_link *link;
+
+       list_for_each_entry(link, &merge_flow->linked_flows, merge_flow.list)
+               if (link->sub_flow.flow == sub_flow) {
+                       nfp_flower_unlink_flow(link);
+                       return;
+               }
+}
+
+static int nfp_flower_link_flows(struct nfp_fl_payload *merge_flow,
+                                struct nfp_fl_payload *sub_flow)
+{
+       struct nfp_fl_payload_link *link;
+
+       link = kmalloc(sizeof(*link), GFP_KERNEL);
+       if (!link)
+               return -ENOMEM;
+
+       link->merge_flow.flow = merge_flow;
+       list_add_tail(&link->merge_flow.list, &merge_flow->linked_flows);
+       link->sub_flow.flow = sub_flow;
+       list_add_tail(&link->sub_flow.list, &sub_flow->linked_flows);
+
+       return 0;
+}
+
+/**
+ * nfp_flower_merge_offloaded_flows() - Merge 2 existing flows to single flow.
+ * @app:       Pointer to the APP handle
+ * @sub_flow1: Initial flow matched to produce merge hint
+ * @sub_flow2: Post recirculation flow matched in merge hint
+ *
+ * Combines 2 flows (if valid) to a single flow, removing the initial from hw
+ * and offloading the new, merged flow.
+ *
+ * Return: negative value on error, 0 in success.
+ */
+int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
+                                    struct nfp_fl_payload *sub_flow1,
+                                    struct nfp_fl_payload *sub_flow2)
+{
+       struct tc_cls_flower_offload merge_tc_off;
+       struct nfp_flower_priv *priv = app->priv;
+       struct nfp_fl_payload *merge_flow;
+       struct nfp_fl_key_ls merge_key_ls;
+       int err;
+
+       ASSERT_RTNL();
+
+       if (sub_flow1 == sub_flow2 ||
+           nfp_flower_is_merge_flow(sub_flow1) ||
+           nfp_flower_is_merge_flow(sub_flow2))
+               return -EINVAL;
+
+       err = nfp_flower_can_merge(sub_flow1, sub_flow2);
+       if (err)
+               return err;
+
+       merge_key_ls.key_size = sub_flow1->meta.key_len;
+
+       merge_flow = nfp_flower_allocate_new(&merge_key_ls);
+       if (!merge_flow)
+               return -ENOMEM;
+
+       merge_flow->tc_flower_cookie = (unsigned long)merge_flow;
+       merge_flow->ingress_dev = sub_flow1->ingress_dev;
+
+       memcpy(merge_flow->unmasked_data, sub_flow1->unmasked_data,
+              sub_flow1->meta.key_len);
+       memcpy(merge_flow->mask_data, sub_flow1->mask_data,
+              sub_flow1->meta.mask_len);
+
+       err = nfp_flower_merge_action(sub_flow1, sub_flow2, merge_flow);
+       if (err)
+               goto err_destroy_merge_flow;
+
+       err = nfp_flower_link_flows(merge_flow, sub_flow1);
+       if (err)
+               goto err_destroy_merge_flow;
+
+       err = nfp_flower_link_flows(merge_flow, sub_flow2);
+       if (err)
+               goto err_unlink_sub_flow1;
+
+       merge_tc_off.cookie = merge_flow->tc_flower_cookie;
+       err = nfp_compile_flow_metadata(app, &merge_tc_off, merge_flow,
+                                       merge_flow->ingress_dev);
+       if (err)
+               goto err_unlink_sub_flow2;
+
+       err = rhashtable_insert_fast(&priv->flow_table, &merge_flow->fl_node,
+                                    nfp_flower_table_params);
+       if (err)
+               goto err_release_metadata;
+
+       err = nfp_flower_xmit_flow(app, merge_flow,
+                                  NFP_FLOWER_CMSG_TYPE_FLOW_MOD);
+       if (err)
+               goto err_remove_rhash;
+
+       merge_flow->in_hw = true;
+       sub_flow1->in_hw = false;
+
+       return 0;
+
+err_remove_rhash:
+       WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
+                                           &merge_flow->fl_node,
+                                           nfp_flower_table_params));
+err_release_metadata:
+       nfp_modify_flow_metadata(app, merge_flow);
+err_unlink_sub_flow2:
+       nfp_flower_unlink_flows(merge_flow, sub_flow2);
+err_unlink_sub_flow1:
+       nfp_flower_unlink_flows(merge_flow, sub_flow1);
+err_destroy_merge_flow:
+       kfree(merge_flow->action_data);
+       kfree(merge_flow->mask_data);
+       kfree(merge_flow->unmasked_data);
+       kfree(merge_flow);
+       return err;
+}
+
 /**
  * nfp_flower_add_offload() - Adds a new flow to hardware.
  * @app:       Pointer to the APP handle
@@ -454,6 +919,8 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
        if (port)
                port->tc_offload_cnt++;
 
+       flow_pay->in_hw = true;
+
        /* Deallocate flow payload when flower rule has been destroyed. */
        kfree(key_layer);
 
@@ -475,6 +942,75 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
        return err;
 }
 
+static void
+nfp_flower_remove_merge_flow(struct nfp_app *app,
+                            struct nfp_fl_payload *del_sub_flow,
+                            struct nfp_fl_payload *merge_flow)
+{
+       struct nfp_flower_priv *priv = app->priv;
+       struct nfp_fl_payload_link *link, *temp;
+       struct nfp_fl_payload *origin;
+       bool mod = false;
+       int err;
+
+       link = list_first_entry(&merge_flow->linked_flows,
+                               struct nfp_fl_payload_link, merge_flow.list);
+       origin = link->sub_flow.flow;
+
+       /* Re-add rule the merge had overwritten if it has not been deleted. */
+       if (origin != del_sub_flow)
+               mod = true;
+
+       err = nfp_modify_flow_metadata(app, merge_flow);
+       if (err) {
+               nfp_flower_cmsg_warn(app, "Metadata fail for merge flow delete.\n");
+               goto err_free_links;
+       }
+
+       if (!mod) {
+               err = nfp_flower_xmit_flow(app, merge_flow,
+                                          NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
+               if (err) {
+                       nfp_flower_cmsg_warn(app, "Failed to delete merged flow.\n");
+                       goto err_free_links;
+               }
+       } else {
+               __nfp_modify_flow_metadata(priv, origin);
+               err = nfp_flower_xmit_flow(app, origin,
+                                          NFP_FLOWER_CMSG_TYPE_FLOW_MOD);
+               if (err)
+                       nfp_flower_cmsg_warn(app, "Failed to revert merge flow.\n");
+               origin->in_hw = true;
+       }
+
+err_free_links:
+       /* Clean any links connected with the merged flow. */
+       list_for_each_entry_safe(link, temp, &merge_flow->linked_flows,
+                                merge_flow.list)
+               nfp_flower_unlink_flow(link);
+
+       kfree(merge_flow->action_data);
+       kfree(merge_flow->mask_data);
+       kfree(merge_flow->unmasked_data);
+       WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
+                                           &merge_flow->fl_node,
+                                           nfp_flower_table_params));
+       kfree_rcu(merge_flow, rcu);
+}
+
+static void
+nfp_flower_del_linked_merge_flows(struct nfp_app *app,
+                                 struct nfp_fl_payload *sub_flow)
+{
+       struct nfp_fl_payload_link *link, *temp;
+
+       /* Remove any merge flow formed from the deleted sub_flow. */
+       list_for_each_entry_safe(link, temp, &sub_flow->linked_flows,
+                                sub_flow.list)
+               nfp_flower_remove_merge_flow(app, sub_flow,
+                                            link->merge_flow.flow);
+}
+
 /**
  * nfp_flower_del_offload() - Removes a flow from hardware.
  * @app:       Pointer to the APP handle
@@ -482,7 +1018,7 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
  * @flow:      TC flower classifier offload structure
  *
  * Removes a flow from the repeated hash structure and clears the
- * action payload.
+ * action payload. Any flows merged from this are also deleted.
  *
  * Return: negative value on error, 0 if removed successfully.
  */
@@ -504,17 +1040,22 @@ nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
 
        err = nfp_modify_flow_metadata(app, nfp_flow);
        if (err)
-               goto err_free_flow;
+               goto err_free_merge_flow;
 
        if (nfp_flow->nfp_tun_ipv4_addr)
                nfp_tunnel_del_ipv4_off(app, nfp_flow->nfp_tun_ipv4_addr);
 
+       if (!nfp_flow->in_hw) {
+               err = 0;
+               goto err_free_merge_flow;
+       }
+
        err = nfp_flower_xmit_flow(app, nfp_flow,
                                   NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
-       if (err)
-               goto err_free_flow;
+       /* Fall through on error. */
 
-err_free_flow:
+err_free_merge_flow:
+       nfp_flower_del_linked_merge_flows(app, nfp_flow);
        if (port)
                port->tc_offload_cnt--;
        kfree(nfp_flow->action_data);
@@ -527,6 +1068,52 @@ nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
        return err;
 }
 
+static void
+__nfp_flower_update_merge_stats(struct nfp_app *app,
+                               struct nfp_fl_payload *merge_flow)
+{
+       struct nfp_flower_priv *priv = app->priv;
+       struct nfp_fl_payload_link *link;
+       struct nfp_fl_payload *sub_flow;
+       u64 pkts, bytes, used;
+       u32 ctx_id;
+
+       ctx_id = be32_to_cpu(merge_flow->meta.host_ctx_id);
+       pkts = priv->stats[ctx_id].pkts;
+       /* Do not cycle subflows if no stats to distribute. */
+       if (!pkts)
+               return;
+       bytes = priv->stats[ctx_id].bytes;
+       used = priv->stats[ctx_id].used;
+
+       /* Reset stats for the merge flow. */
+       priv->stats[ctx_id].pkts = 0;
+       priv->stats[ctx_id].bytes = 0;
+
+       /* The merge flow has received stats updates from firmware.
+        * Distribute these stats to all subflows that form the merge.
+        * The stats will collected from TC via the subflows.
+        */
+       list_for_each_entry(link, &merge_flow->linked_flows, merge_flow.list) {
+               sub_flow = link->sub_flow.flow;
+               ctx_id = be32_to_cpu(sub_flow->meta.host_ctx_id);
+               priv->stats[ctx_id].pkts += pkts;
+               priv->stats[ctx_id].bytes += bytes;
+               max_t(u64, priv->stats[ctx_id].used, used);
+       }
+}
+
+static void
+nfp_flower_update_merge_stats(struct nfp_app *app,
+                             struct nfp_fl_payload *sub_flow)
+{
+       struct nfp_fl_payload_link *link;
+
+       /* Get merge flows that the subflow forms to distribute their stats. */
+       list_for_each_entry(link, &sub_flow->linked_flows, sub_flow.list)
+               __nfp_flower_update_merge_stats(app, link->merge_flow.flow);
+}
+
 /**
  * nfp_flower_get_stats() - Populates flow stats obtained from hardware.
  * @app:       Pointer to the APP handle
@@ -553,6 +1140,10 @@ nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev,
        ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id);
 
        spin_lock_bh(&priv->stats_lock);
+       /* If request is for a sub_flow, update stats from merged flows. */
+       if (!list_empty(&nfp_flow->linked_flows))
+               nfp_flower_update_merge_stats(app, nfp_flow);
+
        flow_stats_update(&flow->stats, priv->stats[ctx_id].bytes,
                          priv->stats[ctx_id].pkts, priv->stats[ctx_id].used);
 
@@ -682,7 +1273,9 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app,
        struct nfp_flower_priv *priv = app->priv;
        int err;
 
-       if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+       if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
+           !(f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS &&
+             nfp_flower_internal_port_can_offload(app, netdev)))
                return -EOPNOTSUPP;
 
        switch (f->command) {
index 4d78be4ec4e99b85a77f983da8b20041852ad11a..faa06edf95acc106a7c30c06de13d71e6267a584 100644 (file)
@@ -171,7 +171,7 @@ void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb)
        for (i = 0; i < count; i++) {
                ipv4_addr = payload->tun_info[i].ipv4;
                port = be32_to_cpu(payload->tun_info[i].egress_port);
-               netdev = nfp_app_repr_get(app, port);
+               netdev = nfp_app_dev_get(app, port, NULL);
                if (!netdev)
                        continue;
 
@@ -270,9 +270,10 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
                    struct flowi4 *flow, struct neighbour *neigh, gfp_t flag)
 {
        struct nfp_tun_neigh payload;
+       u32 port_id;
 
-       /* Only offload representor IPv4s for now. */
-       if (!nfp_netdev_is_nfp_repr(netdev))
+       port_id = nfp_flower_get_port_id_from_netdev(app, netdev);
+       if (!port_id)
                return;
 
        memset(&payload, 0, sizeof(struct nfp_tun_neigh));
@@ -290,7 +291,7 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
        payload.src_ipv4 = flow->saddr;
        ether_addr_copy(payload.src_addr, netdev->dev_addr);
        neigh_ha_snapshot(payload.dst_addr, neigh, netdev);
-       payload.port_id = cpu_to_be32(nfp_repr_get_port_id(netdev));
+       payload.port_id = cpu_to_be32(port_id);
        /* Add destination of new route to NFP cache. */
        nfp_tun_add_route_to_cache(app, payload.dst_ipv4);
 
@@ -366,7 +367,7 @@ void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb)
 
        payload = nfp_flower_cmsg_get_data(skb);
 
-       netdev = nfp_app_repr_get(app, be32_to_cpu(payload->ingress_port));
+       netdev = nfp_app_dev_get(app, be32_to_cpu(payload->ingress_port), NULL);
        if (!netdev)
                goto route_fail_warning;
 
index a6fda07fce43301cd3803ea350f6b292a2cd7232..76d13af46a7a313d75d6e2b15ae1dbe57bf8b171 100644 (file)
@@ -79,7 +79,7 @@ extern const struct nfp_app_type app_abm;
  * @eswitch_mode_set:    set SR-IOV eswitch mode (under pf->lock)
  * @sriov_enable: app-specific sriov initialisation
  * @sriov_disable: app-specific sriov clean-up
- * @repr_get:  get representor netdev
+ * @dev_get:   get representor or internal port representing netdev
  */
 struct nfp_app_type {
        enum nfp_app_id id;
@@ -143,7 +143,8 @@ struct nfp_app_type {
 
        enum devlink_eswitch_mode (*eswitch_mode_get)(struct nfp_app *app);
        int (*eswitch_mode_set)(struct nfp_app *app, u16 mode);
-       struct net_device *(*repr_get)(struct nfp_app *app, u32 id);
+       struct net_device *(*dev_get)(struct nfp_app *app, u32 id,
+                                     bool *redir_egress);
 };
 
 /**
@@ -397,12 +398,14 @@ static inline void nfp_app_sriov_disable(struct nfp_app *app)
                app->type->sriov_disable(app);
 }
 
-static inline struct net_device *nfp_app_repr_get(struct nfp_app *app, u32 id)
+static inline
+struct net_device *nfp_app_dev_get(struct nfp_app *app, u32 id,
+                                  bool *redir_egress)
 {
-       if (unlikely(!app || !app->type->repr_get))
+       if (unlikely(!app || !app->type->dev_get))
                return NULL;
 
-       return app->type->repr_get(app, id);
+       return app->type->dev_get(app, id, redir_egress);
 }
 
 struct nfp_app *nfp_app_from_netdev(struct net_device *netdev);
index 919da0d84fb4cd7ee30e85cfdb8fa0fcd3a46ff0..8e7591241e7ca587014842ad972e11286be862a2 100644 (file)
@@ -354,6 +354,8 @@ int nfp_devlink_port_register(struct nfp_app *app, struct nfp_port *port)
 {
        struct nfp_eth_table_port eth_port;
        struct devlink *devlink;
+       const u8 *serial;
+       int serial_len;
        int ret;
 
        rtnl_lock();
@@ -362,9 +364,10 @@ int nfp_devlink_port_register(struct nfp_app *app, struct nfp_port *port)
        if (ret)
                return ret;
 
+       serial_len = nfp_cpp_serial(port->app->cpp, &serial);
        devlink_port_attrs_set(&port->dl_port, DEVLINK_PORT_FLAVOUR_PHYSICAL,
                               eth_port.label_port, eth_port.is_split,
-                              eth_port.label_subport);
+                              eth_port.label_subport, serial, serial_len);
 
        devlink = priv_to_devlink(app->pf);
 
index be37c2d6151c43f705515314e555f6fc643ccd91..df9aff2684ed0a57ed9c70451b384e270433ce67 100644 (file)
@@ -539,12 +539,17 @@ struct nfp_net_dp {
  * @shared_handler:     Handler for shared interrupts
  * @shared_name:        Name for shared interrupt
  * @me_freq_mhz:        ME clock_freq (MHz)
- * @reconfig_lock:     Protects HW reconfiguration request regs/machinery
+ * @reconfig_lock:     Protects @reconfig_posted, @reconfig_timer_active,
+ *                     @reconfig_sync_present and HW reconfiguration request
+ *                     regs/machinery from async requests (sync must take
+ *                     @bar_lock)
  * @reconfig_posted:   Pending reconfig bits coming from async sources
  * @reconfig_timer_active:  Timer for reading reconfiguration results is pending
  * @reconfig_sync_present:  Some thread is performing synchronous reconfig
  * @reconfig_timer:    Timer for async reading of reconfig results
  * @reconfig_in_progress_update:       Update FW is processing now (debug only)
+ * @bar_lock:          vNIC config BAR access lock, protects: update,
+ *                     mailbox area
  * @link_up:            Is the link up?
  * @link_status_lock:  Protects @link_* and ensures atomicity with BAR reading
  * @rx_coalesce_usecs:      RX interrupt moderation usecs delay parameter
@@ -615,6 +620,8 @@ struct nfp_net {
        struct timer_list reconfig_timer;
        u32 reconfig_in_progress_update;
 
+       struct mutex bar_lock;
+
        u32 rx_coalesce_usecs;
        u32 rx_coalesce_max_frames;
        u32 tx_coalesce_usecs;
@@ -839,6 +846,16 @@ static inline void nfp_ctrl_unlock(struct nfp_net *nn)
        spin_unlock_bh(&nn->r_vecs[0].lock);
 }
 
+static inline void nn_ctrl_bar_lock(struct nfp_net *nn)
+{
+       mutex_lock(&nn->bar_lock);
+}
+
+static inline void nn_ctrl_bar_unlock(struct nfp_net *nn)
+{
+       mutex_unlock(&nn->bar_lock);
+}
+
 /* Globals */
 extern const char nfp_driver_version[];
 
@@ -871,7 +888,9 @@ unsigned int nfp_net_rss_key_sz(struct nfp_net *nn);
 void nfp_net_rss_write_itbl(struct nfp_net *nn);
 void nfp_net_rss_write_key(struct nfp_net *nn);
 void nfp_net_coalesce_write_cfg(struct nfp_net *nn);
-int nfp_net_reconfig_mbox(struct nfp_net *nn, u32 mbox_cmd);
+int nfp_net_mbox_lock(struct nfp_net *nn, unsigned int data_size);
+int nfp_net_mbox_reconfig(struct nfp_net *nn, u32 mbox_cmd);
+int nfp_net_mbox_reconfig_and_unlock(struct nfp_net *nn, u32 mbox_cmd);
 
 unsigned int
 nfp_net_irqs_alloc(struct pci_dev *pdev, struct msix_entry *irq_entries,
index 99200b5dac76d50f5669243da2949d4f45b5c155..58657fe504d7886fe3c7cdea849eddb3fcb874b4 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/interrupt.h>
 #include <linux/ip.h>
 #include <linux/ipv6.h>
+#include <linux/lockdep.h>
 #include <linux/mm.h>
 #include <linux/overflow.h>
 #include <linux/page_ref.h>
@@ -137,20 +138,37 @@ static bool nfp_net_reconfig_check_done(struct nfp_net *nn, bool last_check)
        return false;
 }
 
-static int nfp_net_reconfig_wait(struct nfp_net *nn, unsigned long deadline)
+static bool __nfp_net_reconfig_wait(struct nfp_net *nn, unsigned long deadline)
 {
        bool timed_out = false;
+       int i;
+
+       /* Poll update field, waiting for NFP to ack the config.
+        * Do an opportunistic wait-busy loop, afterward sleep.
+        */
+       for (i = 0; i < 50; i++) {
+               if (nfp_net_reconfig_check_done(nn, false))
+                       return false;
+               udelay(4);
+       }
 
-       /* Poll update field, waiting for NFP to ack the config */
        while (!nfp_net_reconfig_check_done(nn, timed_out)) {
-               msleep(1);
+               usleep_range(250, 500);
                timed_out = time_is_before_eq_jiffies(deadline);
        }
 
+       return timed_out;
+}
+
+static int nfp_net_reconfig_wait(struct nfp_net *nn, unsigned long deadline)
+{
+       if (__nfp_net_reconfig_wait(nn, deadline))
+               return -EIO;
+
        if (nn_readl(nn, NFP_NET_CFG_UPDATE) & NFP_NET_CFG_UPDATE_ERR)
                return -EIO;
 
-       return timed_out ? -EIO : 0;
+       return 0;
 }
 
 static void nfp_net_reconfig_timer(struct timer_list *t)
@@ -243,7 +261,7 @@ static void nfp_net_reconfig_wait_posted(struct nfp_net *nn)
 }
 
 /**
- * nfp_net_reconfig() - Reconfigure the firmware
+ * __nfp_net_reconfig() - Reconfigure the firmware
  * @nn:      NFP Net device to reconfigure
  * @update:  The value for the update field in the BAR config
  *
@@ -253,10 +271,12 @@ static void nfp_net_reconfig_wait_posted(struct nfp_net *nn)
  *
  * Return: Negative errno on error, 0 on success
  */
-int nfp_net_reconfig(struct nfp_net *nn, u32 update)
+static int __nfp_net_reconfig(struct nfp_net *nn, u32 update)
 {
        int ret;
 
+       lockdep_assert_held(&nn->bar_lock);
+
        nfp_net_reconfig_sync_enter(nn);
 
        nfp_net_reconfig_start(nn, update);
@@ -274,8 +294,31 @@ int nfp_net_reconfig(struct nfp_net *nn, u32 update)
        return ret;
 }
 
+int nfp_net_reconfig(struct nfp_net *nn, u32 update)
+{
+       int ret;
+
+       nn_ctrl_bar_lock(nn);
+       ret = __nfp_net_reconfig(nn, update);
+       nn_ctrl_bar_unlock(nn);
+
+       return ret;
+}
+
+int nfp_net_mbox_lock(struct nfp_net *nn, unsigned int data_size)
+{
+       if (nn->tlv_caps.mbox_len < NFP_NET_CFG_MBOX_SIMPLE_VAL + data_size) {
+               nn_err(nn, "mailbox too small for %u of data (%u)\n",
+                      data_size, nn->tlv_caps.mbox_len);
+               return -EIO;
+       }
+
+       nn_ctrl_bar_lock(nn);
+       return 0;
+}
+
 /**
- * nfp_net_reconfig_mbox() - Reconfigure the firmware via the mailbox
+ * nfp_net_mbox_reconfig() - Reconfigure the firmware via the mailbox
  * @nn:        NFP Net device to reconfigure
  * @mbox_cmd:  The value for the mailbox command
  *
@@ -283,19 +326,15 @@ int nfp_net_reconfig(struct nfp_net *nn, u32 update)
  *
  * Return: Negative errno on error, 0 on success
  */
-int nfp_net_reconfig_mbox(struct nfp_net *nn, u32 mbox_cmd)
+int nfp_net_mbox_reconfig(struct nfp_net *nn, u32 mbox_cmd)
 {
        u32 mbox = nn->tlv_caps.mbox_off;
        int ret;
 
-       if (!nfp_net_has_mbox(&nn->tlv_caps)) {
-               nn_err(nn, "no mailbox present, command: %u\n", mbox_cmd);
-               return -EIO;
-       }
-
+       lockdep_assert_held(&nn->bar_lock);
        nn_writeq(nn, mbox + NFP_NET_CFG_MBOX_SIMPLE_CMD, mbox_cmd);
 
-       ret = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_MBOX);
+       ret = __nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_MBOX);
        if (ret) {
                nn_err(nn, "Mailbox update error\n");
                return ret;
@@ -304,6 +343,15 @@ int nfp_net_reconfig_mbox(struct nfp_net *nn, u32 mbox_cmd)
        return -nn_readl(nn, mbox + NFP_NET_CFG_MBOX_SIMPLE_RET);
 }
 
+int nfp_net_mbox_reconfig_and_unlock(struct nfp_net *nn, u32 mbox_cmd)
+{
+       int ret;
+
+       ret = nfp_net_mbox_reconfig(nn, mbox_cmd);
+       nn_ctrl_bar_unlock(nn);
+       return ret;
+}
+
 /* Interrupt configuration and handling
  */
 
@@ -909,7 +957,7 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
                nfp_net_tx_ring_stop(nd_q, tx_ring);
 
        tx_ring->wr_ptr_add += nr_frags + 1;
-       if (__netdev_tx_sent_queue(nd_q, txbuf->real_len, skb->xmit_more))
+       if (__netdev_tx_sent_queue(nd_q, txbuf->real_len, netdev_xmit_more()))
                nfp_net_tx_xmit_more_flush(tx_ring);
 
        return NETDEV_TX_OK;
@@ -1635,6 +1683,7 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
                struct nfp_net_rx_buf *rxbuf;
                struct nfp_net_rx_desc *rxd;
                struct nfp_meta_parsed meta;
+               bool redir_egress = false;
                struct net_device *netdev;
                dma_addr_t new_dma_addr;
                u32 meta_len_xdp = 0;
@@ -1770,13 +1819,16 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
                        struct nfp_net *nn;
 
                        nn = netdev_priv(dp->netdev);
-                       netdev = nfp_app_repr_get(nn->app, meta.portid);
+                       netdev = nfp_app_dev_get(nn->app, meta.portid,
+                                                &redir_egress);
                        if (unlikely(!netdev)) {
                                nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf,
                                                NULL);
                                continue;
                        }
-                       nfp_repr_inc_rx_stats(netdev, pkt_len);
+
+                       if (nfp_netdev_is_nfp_repr(netdev))
+                               nfp_repr_inc_rx_stats(netdev, pkt_len);
                }
 
                skb = build_skb(rxbuf->frag, true_bufsz);
@@ -1811,7 +1863,13 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
                if (meta_len_xdp)
                        skb_metadata_set(skb, meta_len_xdp);
 
-               napi_gro_receive(&rx_ring->r_vec->napi, skb);
+               if (likely(!redir_egress)) {
+                       napi_gro_receive(&rx_ring->r_vec->napi, skb);
+               } else {
+                       skb->dev = netdev;
+                       __skb_push(skb, ETH_HLEN);
+                       dev_queue_xmit(skb);
+               }
        }
 
        if (xdp_prog) {
@@ -3111,7 +3169,9 @@ static int nfp_net_change_mtu(struct net_device *netdev, int new_mtu)
 static int
 nfp_net_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
 {
+       const u32 cmd = NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_ADD;
        struct nfp_net *nn = netdev_priv(netdev);
+       int err;
 
        /* Priority tagged packets with vlan id 0 are processed by the
         * NFP as untagged packets
@@ -3119,17 +3179,23 @@ nfp_net_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
        if (!vid)
                return 0;
 
+       err = nfp_net_mbox_lock(nn, NFP_NET_CFG_VLAN_FILTER_SZ);
+       if (err)
+               return err;
+
        nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_VID, vid);
        nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_PROTO,
                  ETH_P_8021Q);
 
-       return nfp_net_reconfig_mbox(nn, NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_ADD);
+       return nfp_net_mbox_reconfig_and_unlock(nn, cmd);
 }
 
 static int
 nfp_net_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
 {
+       const u32 cmd = NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL;
        struct nfp_net *nn = netdev_priv(netdev);
+       int err;
 
        /* Priority tagged packets with vlan id 0 are processed by the
         * NFP as untagged packets
@@ -3137,11 +3203,15 @@ nfp_net_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
        if (!vid)
                return 0;
 
+       err = nfp_net_mbox_lock(nn, NFP_NET_CFG_VLAN_FILTER_SZ);
+       if (err)
+               return err;
+
        nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_VID, vid);
        nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_PROTO,
                  ETH_P_8021Q);
 
-       return nfp_net_reconfig_mbox(nn, NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL);
+       return nfp_net_mbox_reconfig_and_unlock(nn, cmd);
 }
 
 static void nfp_net_stat64(struct net_device *netdev,
@@ -3533,7 +3603,6 @@ const struct net_device_ops nfp_net_netdev_ops = {
        .ndo_udp_tunnel_add     = nfp_net_add_vxlan_port,
        .ndo_udp_tunnel_del     = nfp_net_del_vxlan_port,
        .ndo_bpf                = nfp_net_xdp,
-       .ndo_get_port_parent_id = nfp_port_get_port_parent_id,
        .ndo_get_devlink_port   = nfp_devlink_get_devlink_port,
 };
 
@@ -3634,6 +3703,8 @@ nfp_net_alloc(struct pci_dev *pdev, void __iomem *ctrl_bar, bool needs_netdev,
        nn->dp.txd_cnt = NFP_NET_TX_DESCS_DEFAULT;
        nn->dp.rxd_cnt = NFP_NET_RX_DESCS_DEFAULT;
 
+       mutex_init(&nn->bar_lock);
+
        spin_lock_init(&nn->reconfig_lock);
        spin_lock_init(&nn->link_status_lock);
 
@@ -3661,6 +3732,9 @@ nfp_net_alloc(struct pci_dev *pdev, void __iomem *ctrl_bar, bool needs_netdev,
 void nfp_net_free(struct nfp_net *nn)
 {
        WARN_ON(timer_pending(&nn->reconfig_timer) || nn->reconfig_posted);
+
+       mutex_destroy(&nn->bar_lock);
+
        if (nn->dp.netdev)
                free_netdev(nn->dp.netdev);
        else
@@ -3922,9 +3996,6 @@ int nfp_net_init(struct nfp_net *nn)
                nn->dp.ctrl |= NFP_NET_CFG_CTRL_IRQMOD;
        }
 
-       if (nn->dp.netdev)
-               nfp_net_netdev_init(nn);
-
        /* Stash the re-configuration queue away.  First odd queue in TX Bar */
        nn->qcp_cfg = nn->tx_bar + NFP_QCP_QUEUE_ADDR_SZ;
 
@@ -3937,6 +4008,9 @@ int nfp_net_init(struct nfp_net *nn)
        if (err)
                return err;
 
+       if (nn->dp.netdev)
+               nfp_net_netdev_init(nn);
+
        nfp_net_vecs_init(nn);
 
        if (!nn->dp.netdev)
index f5d564bbb55a0c25cac4b92e7bef5563d811092a..25919e3380718ccd10a961a49c731a64ef8d0211 100644 (file)
 #define NFP_NET_CFG_MBOX_SIMPLE_CMD    0x0
 #define NFP_NET_CFG_MBOX_SIMPLE_RET    0x4
 #define NFP_NET_CFG_MBOX_SIMPLE_VAL    0x8
-#define NFP_NET_CFG_MBOX_SIMPLE_LEN    12
 
 #define NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_ADD 1
 #define NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL 2
@@ -495,10 +494,4 @@ struct nfp_net_tlv_caps {
 
 int nfp_net_tlv_caps_parse(struct device *dev, u8 __iomem *ctrl_mem,
                           struct nfp_net_tlv_caps *caps);
-
-static inline bool nfp_net_has_mbox(struct nfp_net_tlv_caps *caps)
-{
-       return caps->mbox_len >= NFP_NET_CFG_MBOX_SIMPLE_LEN;
-}
-
 #endif /* _NFP_NET_CTRL_H_ */
index 690b62718dbb3110a8cad67fa354336a729223cf..851e31e0ba8e01354707835b326d91aa31414924 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/pci.h>
 #include <linux/ethtool.h>
 #include <linux/firmware.h>
+#include <linux/sfp.h>
 
 #include "nfpcore/nfp.h"
 #include "nfpcore/nfp_nsp.h"
@@ -152,6 +153,8 @@ static const struct nfp_et_stat nfp_mac_et_stats[] = {
 #define NN_RVEC_GATHER_STATS   9
 #define NN_RVEC_PER_Q_STATS    3
 
+#define SFP_SFF_REV_COMPLIANCE 1
+
 static void nfp_net_get_nspinfo(struct nfp_app *app, char *version)
 {
        struct nfp_nsp *nsp;
@@ -1096,6 +1099,130 @@ nfp_app_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump,
                                            buffer);
 }
 
+static int
+nfp_port_get_module_info(struct net_device *netdev,
+                        struct ethtool_modinfo *modinfo)
+{
+       struct nfp_eth_table_port *eth_port;
+       struct nfp_port *port;
+       unsigned int read_len;
+       struct nfp_nsp *nsp;
+       int err = 0;
+       u8 data;
+
+       port = nfp_port_from_netdev(netdev);
+       eth_port = nfp_port_get_eth_port(port);
+       if (!eth_port)
+               return -EOPNOTSUPP;
+
+       nsp = nfp_nsp_open(port->app->cpp);
+       if (IS_ERR(nsp)) {
+               err = PTR_ERR(nsp);
+               netdev_err(netdev, "Failed to access the NSP: %d\n", err);
+               return err;
+       }
+
+       if (!nfp_nsp_has_read_module_eeprom(nsp)) {
+               netdev_info(netdev, "reading module EEPROM not supported. Please update flash\n");
+               err = -EOPNOTSUPP;
+               goto exit_close_nsp;
+       }
+
+       switch (eth_port->interface) {
+       case NFP_INTERFACE_SFP:
+       case NFP_INTERFACE_SFP28:
+               err = nfp_nsp_read_module_eeprom(nsp, eth_port->eth_index,
+                                                SFP_SFF8472_COMPLIANCE, &data,
+                                                1, &read_len);
+               if (err < 0)
+                       goto exit_close_nsp;
+
+               if (!data) {
+                       modinfo->type = ETH_MODULE_SFF_8079;
+                       modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
+               } else {
+                       modinfo->type = ETH_MODULE_SFF_8472;
+                       modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+               }
+               break;
+       case NFP_INTERFACE_QSFP:
+               err = nfp_nsp_read_module_eeprom(nsp, eth_port->eth_index,
+                                                SFP_SFF_REV_COMPLIANCE, &data,
+                                                1, &read_len);
+               if (err < 0)
+                       goto exit_close_nsp;
+
+               if (data < 0x3) {
+                       modinfo->type = ETH_MODULE_SFF_8436;
+                       modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
+               } else {
+                       modinfo->type = ETH_MODULE_SFF_8636;
+                       modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
+               }
+               break;
+       case NFP_INTERFACE_QSFP28:
+               modinfo->type = ETH_MODULE_SFF_8636;
+               modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
+               break;
+       default:
+               netdev_err(netdev, "Unsupported module 0x%x detected\n",
+                          eth_port->interface);
+               err = -EINVAL;
+       }
+
+exit_close_nsp:
+       nfp_nsp_close(nsp);
+       return err;
+}
+
+static int
+nfp_port_get_module_eeprom(struct net_device *netdev,
+                          struct ethtool_eeprom *eeprom, u8 *data)
+{
+       struct nfp_eth_table_port *eth_port;
+       struct nfp_port *port;
+       struct nfp_nsp *nsp;
+       int err;
+
+       port = nfp_port_from_netdev(netdev);
+       eth_port = __nfp_port_get_eth_port(port);
+       if (!eth_port)
+               return -EOPNOTSUPP;
+
+       nsp = nfp_nsp_open(port->app->cpp);
+       if (IS_ERR(nsp)) {
+               err = PTR_ERR(nsp);
+               netdev_err(netdev, "Failed to access the NSP: %d\n", err);
+               return err;
+       }
+
+       if (!nfp_nsp_has_read_module_eeprom(nsp)) {
+               netdev_info(netdev, "reading module EEPROM not supported. Please update flash\n");
+               err = -EOPNOTSUPP;
+               goto exit_close_nsp;
+       }
+
+       err = nfp_nsp_read_module_eeprom(nsp, eth_port->eth_index,
+                                        eeprom->offset, data, eeprom->len,
+                                        &eeprom->len);
+       if (err < 0) {
+               if (eeprom->len) {
+                       netdev_warn(netdev,
+                                   "Incomplete read from module EEPROM: %d\n",
+                                    err);
+                       err = 0;
+               } else {
+                       netdev_err(netdev,
+                                  "Reading from module EEPROM failed: %d\n",
+                                  err);
+               }
+       }
+
+exit_close_nsp:
+       nfp_nsp_close(nsp);
+       return err;
+}
+
 static int nfp_net_set_coalesce(struct net_device *netdev,
                                struct ethtool_coalesce *ec)
 {
@@ -1253,6 +1380,8 @@ static const struct ethtool_ops nfp_net_ethtool_ops = {
        .set_dump               = nfp_app_set_dump,
        .get_dump_flag          = nfp_app_get_dump_flag,
        .get_dump_data          = nfp_app_get_dump_data,
+       .get_module_info        = nfp_port_get_module_info,
+       .get_module_eeprom      = nfp_port_get_module_eeprom,
        .get_coalesce           = nfp_net_get_coalesce,
        .set_coalesce           = nfp_net_set_coalesce,
        .get_channels           = nfp_net_get_channels,
@@ -1272,6 +1401,8 @@ const struct ethtool_ops nfp_port_ethtool_ops = {
        .set_dump               = nfp_app_set_dump,
        .get_dump_flag          = nfp_app_get_dump_flag,
        .get_dump_data          = nfp_app_get_dump_data,
+       .get_module_info        = nfp_port_get_module_info,
+       .get_module_eeprom      = nfp_port_get_module_eeprom,
        .get_link_ksettings     = nfp_net_get_link_ksettings,
        .set_link_ksettings     = nfp_net_set_link_ksettings,
        .get_fecparam           = nfp_port_get_fecparam,
index bf621674f5830617a3ac6104f05106d6571d31e9..08e9bfa95f9bc5bdb8ff2afdef78014eb8cd2a36 100644 (file)
@@ -195,7 +195,7 @@ static netdev_tx_t nfp_repr_xmit(struct sk_buff *skb, struct net_device *netdev)
        ret = dev_queue_xmit(skb);
        nfp_repr_inc_tx_stats(netdev, len, ret);
 
-       return ret;
+       return NETDEV_TX_OK;
 }
 
 static int nfp_repr_stop(struct net_device *netdev)
@@ -272,7 +272,6 @@ const struct net_device_ops nfp_repr_netdev_ops = {
        .ndo_fix_features       = nfp_repr_fix_features,
        .ndo_set_features       = nfp_port_set_features,
        .ndo_set_mac_address    = eth_mac_addr,
-       .ndo_get_port_parent_id = nfp_port_get_port_parent_id,
        .ndo_get_devlink_port   = nfp_devlink_get_devlink_port,
 };
 
@@ -383,7 +382,7 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
        netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
        netdev->gso_max_segs = NFP_NET_LSO_MAX_SEGS;
 
-       netdev->priv_flags |= IFF_NO_QUEUE;
+       netdev->priv_flags |= IFF_NO_QUEUE | IFF_DISABLE_NETPOLL;
        netdev->features |= NETIF_F_LLTX;
 
        if (nfp_app_has_tc(app)) {
index 93c5bfc0510b59975c72411e3e5d09b068a6984b..fcd16877e6e091943fe30c269c761b96a7952d66 100644 (file)
@@ -30,22 +30,6 @@ struct nfp_port *nfp_port_from_netdev(struct net_device *netdev)
        return NULL;
 }
 
-int nfp_port_get_port_parent_id(struct net_device *netdev,
-                               struct netdev_phys_item_id *ppid)
-{
-       struct nfp_port *port;
-       const u8 *serial;
-
-       port = nfp_port_from_netdev(netdev);
-       if (!port)
-               return -EOPNOTSUPP;
-
-       ppid->id_len = nfp_cpp_serial(port->app->cpp, &serial);
-       memcpy(&ppid->id, serial, ppid->id_len);
-
-       return 0;
-}
-
 int nfp_port_setup_tc(struct net_device *netdev, enum tc_setup_type type,
                      void *type_data)
 {
index 3a4e224a64b71745284bd1016951438ca916ecf1..42cf4fd875eacf34a4e287180b63a57192a1ea48 100644 (file)
@@ -79,6 +79,8 @@
 #define NFP_VERSIONS_NCSI_OFF  22
 #define NFP_VERSIONS_CFGR_OFF  26
 
+#define NSP_SFF_EEPROM_BLOCK_LEN       8
+
 enum nfp_nsp_cmd {
        SPCODE_NOOP             = 0, /* No operation */
        SPCODE_SOFT_RESET       = 1, /* Soft reset the NFP */
@@ -95,6 +97,7 @@ enum nfp_nsp_cmd {
        SPCODE_FW_STORED        = 16, /* If no FW loaded, load flash app FW */
        SPCODE_HWINFO_LOOKUP    = 17, /* Lookup HWinfo with overwrites etc. */
        SPCODE_VERSIONS         = 21, /* Report FW versions */
+       SPCODE_READ_SFF_EEPROM  = 22, /* Read module EEPROM */
 };
 
 struct nfp_nsp_dma_buf {
@@ -965,3 +968,62 @@ const char *nfp_nsp_versions_get(enum nfp_nsp_versions id, bool flash,
 
        return (const char *)&buf[buf_off];
 }
+
+static int
+__nfp_nsp_module_eeprom(struct nfp_nsp *state, void *buf, unsigned int size)
+{
+       struct nfp_nsp_command_buf_arg module_eeprom = {
+               {
+                       .code           = SPCODE_READ_SFF_EEPROM,
+                       .option         = size,
+               },
+               .in_buf         = buf,
+               .in_size        = size,
+               .out_buf        = buf,
+               .out_size       = size,
+       };
+
+       return nfp_nsp_command_buf(state, &module_eeprom);
+}
+
+int nfp_nsp_read_module_eeprom(struct nfp_nsp *state, int eth_index,
+                              unsigned int offset, void *data,
+                              unsigned int len, unsigned int *read_len)
+{
+       struct eeprom_buf {
+               u8 metalen;
+               __le16 length;
+               __le16 offset;
+               __le16 readlen;
+               u8 eth_index;
+               u8 data[0];
+       } __packed *buf;
+       int bufsz, ret;
+
+       BUILD_BUG_ON(offsetof(struct eeprom_buf, data) % 8);
+
+       /* Buffer must be large enough and rounded to the next block size. */
+       bufsz = struct_size(buf, data, round_up(len, NSP_SFF_EEPROM_BLOCK_LEN));
+       buf = kzalloc(bufsz, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       buf->metalen =
+               offsetof(struct eeprom_buf, data) / NSP_SFF_EEPROM_BLOCK_LEN;
+       buf->length = cpu_to_le16(len);
+       buf->offset = cpu_to_le16(offset);
+       buf->eth_index = eth_index;
+
+       ret = __nfp_nsp_module_eeprom(state, buf, bufsz);
+
+       *read_len = min_t(unsigned int, len, le16_to_cpu(buf->readlen));
+       if (*read_len)
+               memcpy(data, buf->data, *read_len);
+
+       if (!ret && *read_len < len)
+               ret = -EIO;
+
+       kfree(buf);
+
+       return ret;
+}
index bd9c358c646fde11ae590d758a8e23b9b43eda52..22ee6985ee1c31408b55e2375e1e4197b70c912e 100644 (file)
@@ -22,6 +22,9 @@ int nfp_nsp_write_flash(struct nfp_nsp *state, const struct firmware *fw);
 int nfp_nsp_mac_reinit(struct nfp_nsp *state);
 int nfp_nsp_load_stored_fw(struct nfp_nsp *state);
 int nfp_nsp_hwinfo_lookup(struct nfp_nsp *state, void *buf, unsigned int size);
+int nfp_nsp_read_module_eeprom(struct nfp_nsp *state, int eth_index,
+                              unsigned int offset, void *data,
+                              unsigned int len, unsigned int *read_len);
 
 static inline bool nfp_nsp_has_mac_reinit(struct nfp_nsp *state)
 {
@@ -43,6 +46,11 @@ static inline bool nfp_nsp_has_versions(struct nfp_nsp *state)
        return nfp_nsp_get_abi_ver_minor(state) > 27;
 }
 
+static inline bool nfp_nsp_has_read_module_eeprom(struct nfp_nsp *state)
+{
+       return nfp_nsp_get_abi_ver_minor(state) > 28;
+}
+
 enum nfp_eth_interface {
        NFP_INTERFACE_NONE      = 0,
        NFP_INTERFACE_SFP       = 1,
index 55d686ed8cdfddbfc4077947a9e561539f060281..5ffaee9f53b18d46d339f7b26ef5f1916333644a 100644 (file)
@@ -1355,7 +1355,7 @@ static void pasemi_mac_queue_csdesc(const struct sk_buff *skb,
        const int nh_off = skb_network_offset(skb);
        const int nh_len = skb_network_header_len(skb);
        const int nfrags = skb_shinfo(skb)->nr_frags;
-       int cs_size, i, fill, hdr, cpyhdr, evt;
+       int cs_size, i, fill, hdr, evt;
        dma_addr_t csdma;
 
        fund = XCT_FUN_ST | XCT_FUN_RR_8BRES |
@@ -1396,7 +1396,6 @@ static void pasemi_mac_queue_csdesc(const struct sk_buff *skb,
                fill++;
 
        /* Copy the result into the TCP packet */
-       cpyhdr = fill;
        CS_DESC(csring, fill++) = XCT_FUN_O | XCT_FUN_FUN(csring->fun) |
                                  XCT_FUN_LLEN(2) | XCT_FUN_SE;
        CS_DESC(csring, fill++) = XCT_PTR_LEN(2) | XCT_PTR_ADDR(cs_dest) | XCT_PTR_T;
index 512186adab003bfa555fb4ecacf6f8d058c17ce6..c5e96ce20f59b2c6f00e32fa876de32da1d6527e 100644 (file)
@@ -431,12 +431,16 @@ struct qed_qm_info {
        u8 num_pf_rls;
 };
 
+#define QED_OVERFLOW_BIT       1
+
 struct qed_db_recovery_info {
        struct list_head list;
 
        /* Lock to protect the doorbell recovery mechanism list */
        spinlock_t lock;
+       bool dorq_attn;
        u32 db_recovery_counter;
+       unsigned long overflow;
 };
 
 struct storm_stats {
@@ -923,8 +927,7 @@ u16 qed_get_cm_pq_idx_llt_mtc(struct qed_hwfn *p_hwfn, u8 tc);
 
 /* doorbell recovery mechanism */
 void qed_db_recovery_dp(struct qed_hwfn *p_hwfn);
-void qed_db_recovery_execute(struct qed_hwfn *p_hwfn,
-                            enum qed_db_rec_exec db_exec);
+void qed_db_recovery_execute(struct qed_hwfn *p_hwfn);
 bool qed_edpm_enabled(struct qed_hwfn *p_hwfn);
 
 /* Other Linux specific common definitions */
index 1955737933522f5fd2932fdf24052e4ff48186ab..fccdb06fc5c568f0f2db424f0d7ecb0d7847652f 100644 (file)
@@ -102,11 +102,15 @@ static void qed_db_recovery_dp_entry(struct qed_hwfn *p_hwfn,
 
 /* Doorbell address sanity (address within doorbell bar range) */
 static bool qed_db_rec_sanity(struct qed_dev *cdev,
-                             void __iomem *db_addr, void *db_data)
+                             void __iomem *db_addr,
+                             enum qed_db_rec_width db_width,
+                             void *db_data)
 {
+       u32 width = (db_width == DB_REC_WIDTH_32B) ? 32 : 64;
+
        /* Make sure doorbell address is within the doorbell bar */
        if (db_addr < cdev->doorbells ||
-           (u8 __iomem *)db_addr >
+           (u8 __iomem *)db_addr + width >
            (u8 __iomem *)cdev->doorbells + cdev->db_size) {
                WARN(true,
                     "Illegal doorbell address: %p. Legal range for doorbell addresses is [%p..%p]\n",
@@ -159,7 +163,7 @@ int qed_db_recovery_add(struct qed_dev *cdev,
        }
 
        /* Sanitize doorbell address */
-       if (!qed_db_rec_sanity(cdev, db_addr, db_data))
+       if (!qed_db_rec_sanity(cdev, db_addr, db_width, db_data))
                return -EINVAL;
 
        /* Obtain hwfn from doorbell address */
@@ -205,10 +209,6 @@ int qed_db_recovery_del(struct qed_dev *cdev,
                return 0;
        }
 
-       /* Sanitize doorbell address */
-       if (!qed_db_rec_sanity(cdev, db_addr, db_data))
-               return -EINVAL;
-
        /* Obtain hwfn from doorbell address */
        p_hwfn = qed_db_rec_find_hwfn(cdev, db_addr);
 
@@ -300,31 +300,24 @@ void qed_db_recovery_dp(struct qed_hwfn *p_hwfn)
 
 /* Ring the doorbell of a single doorbell recovery entry */
 static void qed_db_recovery_ring(struct qed_hwfn *p_hwfn,
-                                struct qed_db_recovery_entry *db_entry,
-                                enum qed_db_rec_exec db_exec)
-{
-       if (db_exec != DB_REC_ONCE) {
-               /* Print according to width */
-               if (db_entry->db_width == DB_REC_WIDTH_32B) {
-                       DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
-                                  "%s doorbell address %p data %x\n",
-                                  db_exec == DB_REC_DRY_RUN ?
-                                  "would have rung" : "ringing",
-                                  db_entry->db_addr,
-                                  *(u32 *)db_entry->db_data);
-               } else {
-                       DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
-                                  "%s doorbell address %p data %llx\n",
-                                  db_exec == DB_REC_DRY_RUN ?
-                                  "would have rung" : "ringing",
-                                  db_entry->db_addr,
-                                  *(u64 *)(db_entry->db_data));
-               }
+                                struct qed_db_recovery_entry *db_entry)
+{
+       /* Print according to width */
+       if (db_entry->db_width == DB_REC_WIDTH_32B) {
+               DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
+                          "ringing doorbell address %p data %x\n",
+                          db_entry->db_addr,
+                          *(u32 *)db_entry->db_data);
+       } else {
+               DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
+                          "ringing doorbell address %p data %llx\n",
+                          db_entry->db_addr,
+                          *(u64 *)(db_entry->db_data));
        }
 
        /* Sanity */
        if (!qed_db_rec_sanity(p_hwfn->cdev, db_entry->db_addr,
-                              db_entry->db_data))
+                              db_entry->db_width, db_entry->db_data))
                return;
 
        /* Flush the write combined buffer. Since there are multiple doorbelling
@@ -334,14 +327,12 @@ static void qed_db_recovery_ring(struct qed_hwfn *p_hwfn,
        wmb();
 
        /* Ring the doorbell */
-       if (db_exec == DB_REC_REAL_DEAL || db_exec == DB_REC_ONCE) {
-               if (db_entry->db_width == DB_REC_WIDTH_32B)
-                       DIRECT_REG_WR(db_entry->db_addr,
-                                     *(u32 *)(db_entry->db_data));
-               else
-                       DIRECT_REG_WR64(db_entry->db_addr,
-                                       *(u64 *)(db_entry->db_data));
-       }
+       if (db_entry->db_width == DB_REC_WIDTH_32B)
+               DIRECT_REG_WR(db_entry->db_addr,
+                             *(u32 *)(db_entry->db_data));
+       else
+               DIRECT_REG_WR64(db_entry->db_addr,
+                               *(u64 *)(db_entry->db_data));
 
        /* Flush the write combined buffer. Next doorbell may come from a
         * different entity to the same address...
@@ -350,29 +341,21 @@ static void qed_db_recovery_ring(struct qed_hwfn *p_hwfn,
 }
 
 /* Traverse the doorbell recovery entry list and ring all the doorbells */
-void qed_db_recovery_execute(struct qed_hwfn *p_hwfn,
-                            enum qed_db_rec_exec db_exec)
+void qed_db_recovery_execute(struct qed_hwfn *p_hwfn)
 {
        struct qed_db_recovery_entry *db_entry = NULL;
 
-       if (db_exec != DB_REC_ONCE) {
-               DP_NOTICE(p_hwfn,
-                         "Executing doorbell recovery. Counter was %d\n",
-                         p_hwfn->db_recovery_info.db_recovery_counter);
+       DP_NOTICE(p_hwfn, "Executing doorbell recovery. Counter was %d\n",
+                 p_hwfn->db_recovery_info.db_recovery_counter);
 
-               /* Track amount of times recovery was executed */
-               p_hwfn->db_recovery_info.db_recovery_counter++;
-       }
+       /* Track amount of times recovery was executed */
+       p_hwfn->db_recovery_info.db_recovery_counter++;
 
        /* Protect the list */
        spin_lock_bh(&p_hwfn->db_recovery_info.lock);
        list_for_each_entry(db_entry,
-                           &p_hwfn->db_recovery_info.list, list_entry) {
-               qed_db_recovery_ring(p_hwfn, db_entry, db_exec);
-               if (db_exec == DB_REC_ONCE)
-                       break;
-       }
-
+                           &p_hwfn->db_recovery_info.list, list_entry)
+               qed_db_recovery_ring(p_hwfn, db_entry);
        spin_unlock_bh(&p_hwfn->db_recovery_info.lock);
 }
 
index e23980e301b6a2be7f015d6a0c6f6aaadbf788b3..8848d5bed6e5c58a188900bf9ad5710529d66b51 100644 (file)
@@ -378,6 +378,9 @@ static int qed_db_rec_flush_queue(struct qed_hwfn *p_hwfn,
        u32 count = QED_DB_REC_COUNT;
        u32 usage = 1;
 
+       /* Flush any pending (e)dpms as they may never arrive */
+       qed_wr(p_hwfn, p_ptt, DORQ_REG_DPM_FORCE_ABORT, 0x1);
+
        /* wait for usage to zero or count to run out. This is necessary since
         * EDPM doorbell transactions can take multiple 64b cycles, and as such
         * can "split" over the pci. Possibly, the doorbell drop can happen with
@@ -406,51 +409,74 @@ static int qed_db_rec_flush_queue(struct qed_hwfn *p_hwfn,
 
 int qed_db_rec_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
-       u32 overflow;
+       u32 attn_ovfl, cur_ovfl;
        int rc;
 
-       overflow = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY);
-       DP_NOTICE(p_hwfn, "PF Overflow sticky 0x%x\n", overflow);
-       if (!overflow) {
-               qed_db_recovery_execute(p_hwfn, DB_REC_ONCE);
+       attn_ovfl = test_and_clear_bit(QED_OVERFLOW_BIT,
+                                      &p_hwfn->db_recovery_info.overflow);
+       cur_ovfl = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY);
+       if (!cur_ovfl && !attn_ovfl)
                return 0;
-       }
 
-       if (qed_edpm_enabled(p_hwfn)) {
+       DP_NOTICE(p_hwfn, "PF Overflow sticky: attn %u current %u\n",
+                 attn_ovfl, cur_ovfl);
+
+       if (cur_ovfl && !p_hwfn->db_bar_no_edpm) {
                rc = qed_db_rec_flush_queue(p_hwfn, p_ptt);
                if (rc)
                        return rc;
        }
 
-       /* Flush any pending (e)dpm as they may never arrive */
-       qed_wr(p_hwfn, p_ptt, DORQ_REG_DPM_FORCE_ABORT, 0x1);
-
        /* Release overflow sticky indication (stop silently dropping everything) */
        qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0);
 
        /* Repeat all last doorbells (doorbell drop recovery) */
-       qed_db_recovery_execute(p_hwfn, DB_REC_REAL_DEAL);
+       qed_db_recovery_execute(p_hwfn);
 
        return 0;
 }
 
-static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn)
+static void qed_dorq_attn_overflow(struct qed_hwfn *p_hwfn)
 {
-       u32 int_sts, first_drop_reason, details, address, all_drops_reason;
        struct qed_ptt *p_ptt = p_hwfn->p_dpc_ptt;
+       u32 overflow;
        int rc;
 
-       int_sts = qed_rd(p_hwfn, p_ptt, DORQ_REG_INT_STS);
-       DP_NOTICE(p_hwfn->cdev, "DORQ attention. int_sts was %x\n", int_sts);
+       overflow = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY);
+       if (!overflow)
+               goto out;
+
+       /* Run PF doorbell recovery in next periodic handler */
+       set_bit(QED_OVERFLOW_BIT, &p_hwfn->db_recovery_info.overflow);
+
+       if (!p_hwfn->db_bar_no_edpm) {
+               rc = qed_db_rec_flush_queue(p_hwfn, p_ptt);
+               if (rc)
+                       goto out;
+       }
+
+       qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0);
+out:
+       /* Schedule the handler even if overflow was not detected */
+       qed_periodic_db_rec_start(p_hwfn);
+}
+
+static int qed_dorq_attn_int_sts(struct qed_hwfn *p_hwfn)
+{
+       u32 int_sts, first_drop_reason, details, address, all_drops_reason;
+       struct qed_ptt *p_ptt = p_hwfn->p_dpc_ptt;
 
        /* int_sts may be zero since all PFs were interrupted for doorbell
         * overflow but another one already handled it. Can abort here. If
         * This PF also requires overflow recovery we will be interrupted again.
         * The masked almost full indication may also be set. Ignoring.
         */
+       int_sts = qed_rd(p_hwfn, p_ptt, DORQ_REG_INT_STS);
        if (!(int_sts & ~DORQ_REG_INT_STS_DORQ_FIFO_AFULL))
                return 0;
 
+       DP_NOTICE(p_hwfn->cdev, "DORQ attention. int_sts was %x\n", int_sts);
+
        /* check if db_drop or overflow happened */
        if (int_sts & (DORQ_REG_INT_STS_DB_DROP |
                       DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR)) {
@@ -477,11 +503,6 @@ static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn)
                          GET_FIELD(details, QED_DORQ_ATTENTION_SIZE) * 4,
                          first_drop_reason, all_drops_reason);
 
-               rc = qed_db_rec_handler(p_hwfn, p_ptt);
-               qed_periodic_db_rec_start(p_hwfn);
-               if (rc)
-                       return rc;
-
                /* Clear the doorbell drop details and prepare for next drop */
                qed_wr(p_hwfn, p_ptt, DORQ_REG_DB_DROP_DETAILS_REL, 0);
 
@@ -507,6 +528,25 @@ static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn)
        return -EINVAL;
 }
 
+static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn)
+{
+       p_hwfn->db_recovery_info.dorq_attn = true;
+       qed_dorq_attn_overflow(p_hwfn);
+
+       return qed_dorq_attn_int_sts(p_hwfn);
+}
+
+static void qed_dorq_attn_handler(struct qed_hwfn *p_hwfn)
+{
+       if (p_hwfn->db_recovery_info.dorq_attn)
+               goto out;
+
+       /* Call DORQ callback if the attention was missed */
+       qed_dorq_attn_cb(p_hwfn);
+out:
+       p_hwfn->db_recovery_info.dorq_attn = false;
+}
+
 /* Instead of major changes to the data-structure, we have a some 'special'
  * identifiers for sources that changed meaning between adapters.
  */
@@ -1080,6 +1120,9 @@ static int qed_int_deassertion(struct qed_hwfn  *p_hwfn,
                }
        }
 
+       /* Handle missed DORQ attention */
+       qed_dorq_attn_handler(p_hwfn);
+
        /* Clear IGU indication for the deasserted bits */
        DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview +
                                    GTT_BAR0_MAP_REG_IGU_CMD +
index 1f356ed4f761e72486df4b57b1994e8f2dd89032..d473b522afc5137f69edece72c535397623ad05d 100644 (file)
@@ -192,8 +192,8 @@ void qed_int_disable_post_isr_release(struct qed_dev *cdev);
 
 /**
  * @brief - Doorbell Recovery handler.
- *          Run DB_REAL_DEAL doorbell recovery in case of PF overflow
- *          (and flush DORQ if needed), otherwise run DB_REC_ONCE.
+ *          Run doorbell recovery in case of PF overflow (and flush DORQ if
+ *          needed).
  *
  * @param p_hwfn
  * @param p_ptt
index f164d4acebcb43a4cd7b2858ad31e95e80467b74..6de23b56b2945c55118cbc3464a46881031583eb 100644 (file)
@@ -970,7 +970,7 @@ static void qed_update_pf_params(struct qed_dev *cdev,
        }
 }
 
-#define QED_PERIODIC_DB_REC_COUNT              100
+#define QED_PERIODIC_DB_REC_COUNT              10
 #define QED_PERIODIC_DB_REC_INTERVAL_MS                100
 #define QED_PERIODIC_DB_REC_INTERVAL \
        msecs_to_jiffies(QED_PERIODIC_DB_REC_INTERVAL_MS)
index 9faaa6df78ed99b8b20b7f78b9efa9d4113b74e3..2f318aaf2b05d8145d4a0a4c45421fbff0bad455 100644 (file)
@@ -1591,7 +1591,7 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
                        p_vfdev->eth_fp_hsi_minor = ETH_HSI_VER_NO_PKT_LEN_TUNN;
                } else {
                        DP_INFO(p_hwfn,
-                               "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's faspath HSI %02x.%02x\n",
+                               "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's fastpath HSI %02x.%02x\n",
                                vf->abs_vf_id,
                                req->vfdev_info.eth_fp_hsi_major,
                                req->vfdev_info.eth_fp_hsi_minor,
index c342b07e3a93767966e61465bd5814b960abb693..954015d2011a80c9564f9f320581507e53f30be0 100644 (file)
@@ -1665,12 +1665,12 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev)
        txq->tx_db.data.bd_prod =
                cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl));
 
-       if (!skb->xmit_more || netif_xmit_stopped(netdev_txq))
+       if (!netdev_xmit_more() || netif_xmit_stopped(netdev_txq))
                qede_update_tx_producer(txq);
 
        if (unlikely(qed_chain_get_elem_left(&txq->tx_pbl)
                      < (MAX_SKB_FRAGS + 1))) {
-               if (skb->xmit_more)
+               if (netdev_xmit_more())
                        qede_update_tx_producer(txq);
 
                netif_tx_stop_queue(netdev_txq);
index 5f3f42a25361679220fcc55224fcb2aa46adec03..bddb2b5982dcfedff2e8139741be978a1fc40e95 100644 (file)
@@ -490,18 +490,17 @@ int qede_ptp_enable(struct qede_dev *edev, bool init_tc)
 
        ptp->clock = ptp_clock_register(&ptp->clock_info, &edev->pdev->dev);
        if (IS_ERR(ptp->clock)) {
-               rc = -EINVAL;
                DP_ERR(edev, "PTP clock registration failed\n");
+               qede_ptp_disable(edev);
+               rc = -EINVAL;
                goto err2;
        }
 
        return 0;
 
-err2:
-       qede_ptp_disable(edev);
-       ptp->clock = NULL;
 err1:
        kfree(ptp);
+err2:
        edev->ptp = NULL;
 
        return rc;
index 0c443ea98479ac0971a6e36c28bd8bde2f080bfa..374a4d4371f99f23fe9d4507e0cbd33e177fbd85 100644 (file)
@@ -497,7 +497,7 @@ struct qlcnic_hardware_context {
        u16 board_type;
        u16 supported_type;
 
-       u16 link_speed;
+       u32 link_speed;
        u16 link_duplex;
        u16 link_autoneg;
        u16 module_type;
index 04aa592f35c36f9782d891c65e2c0d810c9fd4cf..ad335bca3273dc4c1f29108116e8248952de5db8 100644 (file)
@@ -840,7 +840,7 @@ static netdev_tx_t r6040_start_xmit(struct sk_buff *skb,
        skb_tx_timestamp(skb);
 
        /* Trigger the MAC to check the TX descriptor */
-       if (!skb->xmit_more || netif_queue_stopped(dev))
+       if (!netdev_xmit_more() || netif_queue_stopped(dev))
                iowrite16(TM2TX, ioaddr + MTPR);
        lp->tx_insert_ptr = descptr->vndescp;
 
index a8ca26c2ae0c508664a2b03d32cbec67c017f0e1..efaea1a0ad64fb0fdabe9ed4be5ea0c07eb5f59c 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/pm_runtime.h>
 #include <linux/firmware.h>
 #include <linux/prefetch.h>
+#include <linux/pci-aspm.h>
 #include <linux/ipv6.h>
 #include <net/ip6_checksum.h>
 
@@ -490,10 +491,6 @@ enum rtl_register_content {
        PCIDAC          = (1 << 4),
        PCIMulRW        = (1 << 3),
 #define INTT_MASK      GENMASK(1, 0)
-       INTT_0          = 0x0000,       // 8168
-       INTT_1          = 0x0001,       // 8168
-       INTT_2          = 0x0002,       // 8168
-       INTT_3          = 0x0003,       // 8168
 
        /* rtl8169_PHYstatus */
        TBI_Enable      = 0x80,
@@ -702,6 +699,8 @@ struct rtl8169_private {
        u32 ocp_base;
 };
 
+typedef void (*rtl_generic_fct)(struct rtl8169_private *tp);
+
 MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
 MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver");
 module_param_named(debug, debug.msg_enable, int, 0);
@@ -3991,131 +3990,65 @@ static void rtl8106e_hw_phy_config(struct rtl8169_private *tp)
 
 static void rtl_hw_phy_config(struct net_device *dev)
 {
+       static const rtl_generic_fct phy_configs[] = {
+               /* PCI devices. */
+               [RTL_GIGA_MAC_VER_01] = NULL,
+               [RTL_GIGA_MAC_VER_02] = rtl8169s_hw_phy_config,
+               [RTL_GIGA_MAC_VER_03] = rtl8169s_hw_phy_config,
+               [RTL_GIGA_MAC_VER_04] = rtl8169sb_hw_phy_config,
+               [RTL_GIGA_MAC_VER_05] = rtl8169scd_hw_phy_config,
+               [RTL_GIGA_MAC_VER_06] = rtl8169sce_hw_phy_config,
+               /* PCI-E devices. */
+               [RTL_GIGA_MAC_VER_07] = rtl8102e_hw_phy_config,
+               [RTL_GIGA_MAC_VER_08] = rtl8102e_hw_phy_config,
+               [RTL_GIGA_MAC_VER_09] = rtl8102e_hw_phy_config,
+               [RTL_GIGA_MAC_VER_10] = NULL,
+               [RTL_GIGA_MAC_VER_11] = rtl8168bb_hw_phy_config,
+               [RTL_GIGA_MAC_VER_12] = rtl8168bef_hw_phy_config,
+               [RTL_GIGA_MAC_VER_13] = NULL,
+               [RTL_GIGA_MAC_VER_14] = NULL,
+               [RTL_GIGA_MAC_VER_15] = NULL,
+               [RTL_GIGA_MAC_VER_16] = NULL,
+               [RTL_GIGA_MAC_VER_17] = rtl8168bef_hw_phy_config,
+               [RTL_GIGA_MAC_VER_18] = rtl8168cp_1_hw_phy_config,
+               [RTL_GIGA_MAC_VER_19] = rtl8168c_1_hw_phy_config,
+               [RTL_GIGA_MAC_VER_20] = rtl8168c_2_hw_phy_config,
+               [RTL_GIGA_MAC_VER_21] = rtl8168c_3_hw_phy_config,
+               [RTL_GIGA_MAC_VER_22] = rtl8168c_4_hw_phy_config,
+               [RTL_GIGA_MAC_VER_23] = rtl8168cp_2_hw_phy_config,
+               [RTL_GIGA_MAC_VER_24] = rtl8168cp_2_hw_phy_config,
+               [RTL_GIGA_MAC_VER_25] = rtl8168d_1_hw_phy_config,
+               [RTL_GIGA_MAC_VER_26] = rtl8168d_2_hw_phy_config,
+               [RTL_GIGA_MAC_VER_27] = rtl8168d_3_hw_phy_config,
+               [RTL_GIGA_MAC_VER_28] = rtl8168d_4_hw_phy_config,
+               [RTL_GIGA_MAC_VER_29] = rtl8105e_hw_phy_config,
+               [RTL_GIGA_MAC_VER_30] = rtl8105e_hw_phy_config,
+               [RTL_GIGA_MAC_VER_31] = NULL,
+               [RTL_GIGA_MAC_VER_32] = rtl8168e_1_hw_phy_config,
+               [RTL_GIGA_MAC_VER_33] = rtl8168e_1_hw_phy_config,
+               [RTL_GIGA_MAC_VER_34] = rtl8168e_2_hw_phy_config,
+               [RTL_GIGA_MAC_VER_35] = rtl8168f_1_hw_phy_config,
+               [RTL_GIGA_MAC_VER_36] = rtl8168f_2_hw_phy_config,
+               [RTL_GIGA_MAC_VER_37] = rtl8402_hw_phy_config,
+               [RTL_GIGA_MAC_VER_38] = rtl8411_hw_phy_config,
+               [RTL_GIGA_MAC_VER_39] = rtl8106e_hw_phy_config,
+               [RTL_GIGA_MAC_VER_40] = rtl8168g_1_hw_phy_config,
+               [RTL_GIGA_MAC_VER_41] = NULL,
+               [RTL_GIGA_MAC_VER_42] = rtl8168g_2_hw_phy_config,
+               [RTL_GIGA_MAC_VER_43] = rtl8168g_2_hw_phy_config,
+               [RTL_GIGA_MAC_VER_44] = rtl8168g_2_hw_phy_config,
+               [RTL_GIGA_MAC_VER_45] = rtl8168h_1_hw_phy_config,
+               [RTL_GIGA_MAC_VER_46] = rtl8168h_2_hw_phy_config,
+               [RTL_GIGA_MAC_VER_47] = rtl8168h_1_hw_phy_config,
+               [RTL_GIGA_MAC_VER_48] = rtl8168h_2_hw_phy_config,
+               [RTL_GIGA_MAC_VER_49] = rtl8168ep_1_hw_phy_config,
+               [RTL_GIGA_MAC_VER_50] = rtl8168ep_2_hw_phy_config,
+               [RTL_GIGA_MAC_VER_51] = rtl8168ep_2_hw_phy_config,
+       };
        struct rtl8169_private *tp = netdev_priv(dev);
 
-       switch (tp->mac_version) {
-       case RTL_GIGA_MAC_VER_01:
-               break;
-       case RTL_GIGA_MAC_VER_02:
-       case RTL_GIGA_MAC_VER_03:
-               rtl8169s_hw_phy_config(tp);
-               break;
-       case RTL_GIGA_MAC_VER_04:
-               rtl8169sb_hw_phy_config(tp);
-               break;
-       case RTL_GIGA_MAC_VER_05:
-               rtl8169scd_hw_phy_config(tp);
-               break;
-       case RTL_GIGA_MAC_VER_06:
-               rtl8169sce_hw_phy_config(tp);
-               break;
-       case RTL_GIGA_MAC_VER_07:
-       case RTL_GIGA_MAC_VER_08:
-       case RTL_GIGA_MAC_VER_09:
-               rtl8102e_hw_phy_config(tp);
-               break;
-       case RTL_GIGA_MAC_VER_11:
-               rtl8168bb_hw_phy_config(tp);
-               break;
-       case RTL_GIGA_MAC_VER_12:
-               rtl8168bef_hw_phy_config(tp);
-               break;
-       case RTL_GIGA_MAC_VER_17:
-               rtl8168bef_hw_phy_config(tp);
-               break;
-       case RTL_GIGA_MAC_VER_18:
-               rtl8168cp_1_hw_phy_config(tp);
-               break;
-       case RTL_GIGA_MAC_VER_19:
-               rtl8168c_1_hw_phy_config(tp);
-               break;
-       case RTL_GIGA_MAC_VER_20:
-               rtl8168c_2_hw_phy_config(tp);
-               break;
-       case RTL_GIGA_MAC_VER_21:
-               rtl8168c_3_hw_phy_config(tp);
-               break;
-       case RTL_GIGA_MAC_VER_22:
-               rtl8168c_4_hw_phy_config(tp);
-               break;
-       case RTL_GIGA_MAC_VER_23:
-       case RTL_GIGA_MAC_VER_24:
-               rtl8168cp_2_hw_phy_config(tp);
-               break;
-       case RTL_GIGA_MAC_VER_25:
-               rtl8168d_1_hw_phy_config(tp);
-               break;
-       case RTL_GIGA_MAC_VER_26:
-               rtl8168d_2_hw_phy_config(tp);
-               break;
-       case RTL_GIGA_MAC_VER_27:
-               rtl8168d_3_hw_phy_config(tp);
-               break;
-       case RTL_GIGA_MAC_VER_28:
-               rtl8168d_4_hw_phy_config(tp);
-               break;
-       case RTL_GIGA_MAC_VER_29:
-       case RTL_GIGA_MAC_VER_30:
-               rtl8105e_hw_phy_config(tp);
-               break;
-       case RTL_GIGA_MAC_VER_31:
-               /* None. */
-               break;
-       case RTL_GIGA_MAC_VER_32:
-       case RTL_GIGA_MAC_VER_33:
-               rtl8168e_1_hw_phy_config(tp);
-               break;
-       case RTL_GIGA_MAC_VER_34:
-               rtl8168e_2_hw_phy_config(tp);
-               break;
-       case RTL_GIGA_MAC_VER_35:
-               rtl8168f_1_hw_phy_config(tp);
-               break;
-       case RTL_GIGA_MAC_VER_36:
-               rtl8168f_2_hw_phy_config(tp);
-               break;
-
-       case RTL_GIGA_MAC_VER_37:
-               rtl8402_hw_phy_config(tp);
-               break;
-
-       case RTL_GIGA_MAC_VER_38:
-               rtl8411_hw_phy_config(tp);
-               break;
-
-       case RTL_GIGA_MAC_VER_39:
-               rtl8106e_hw_phy_config(tp);
-               break;
-
-       case RTL_GIGA_MAC_VER_40:
-               rtl8168g_1_hw_phy_config(tp);
-               break;
-       case RTL_GIGA_MAC_VER_42:
-       case RTL_GIGA_MAC_VER_43:
-       case RTL_GIGA_MAC_VER_44:
-               rtl8168g_2_hw_phy_config(tp);
-               break;
-       case RTL_GIGA_MAC_VER_45:
-       case RTL_GIGA_MAC_VER_47:
-               rtl8168h_1_hw_phy_config(tp);
-               break;
-       case RTL_GIGA_MAC_VER_46:
-       case RTL_GIGA_MAC_VER_48:
-               rtl8168h_2_hw_phy_config(tp);
-               break;
-
-       case RTL_GIGA_MAC_VER_49:
-               rtl8168ep_1_hw_phy_config(tp);
-               break;
-       case RTL_GIGA_MAC_VER_50:
-       case RTL_GIGA_MAC_VER_51:
-               rtl8168ep_2_hw_phy_config(tp);
-               break;
-
-       case RTL_GIGA_MAC_VER_41:
-       default:
-               break;
-       }
+       if (phy_configs[tp->mac_version])
+               phy_configs[tp->mac_version](tp);
 }
 
 static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag)
@@ -4702,6 +4635,8 @@ static void rtl_hw_start(struct  rtl8169_private *tp)
        rtl_set_rx_tx_desc_registers(tp);
        rtl_lock_config_regs(tp);
 
+       /* disable interrupt coalescing */
+       RTL_W16(tp, IntrMitigate, 0x0000);
        /* Initially a 10 us delay. Turned it into a PCI commit. - FR */
        RTL_R8(tp, IntrMask);
        RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb);
@@ -4734,12 +4669,6 @@ static void rtl_hw_start_8169(struct rtl8169_private *tp)
 
        rtl8169_set_magic_reg(tp, tp->mac_version);
 
-       /*
-        * Undocumented corner. Supposedly:
-        * (TxTimer << 12) | (TxPackets << 8) | (RxTimer << 4) | RxPackets
-        */
-       RTL_W16(tp, IntrMitigate, 0x0000);
-
        RTL_W32(tp, RxMissed, 0);
 }
 
@@ -5452,128 +5381,6 @@ static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp)
        rtl_hw_aspm_clkreq_enable(tp, true);
 }
 
-static void rtl_hw_start_8168(struct rtl8169_private *tp)
-{
-       RTL_W8(tp, MaxTxPacketSize, TxPacketMax);
-
-       tp->cp_cmd &= ~INTT_MASK;
-       tp->cp_cmd |= PktCntrDisable | INTT_1;
-       RTL_W16(tp, CPlusCmd, tp->cp_cmd);
-
-       RTL_W16(tp, IntrMitigate, 0x5151);
-
-       /* Work around for RxFIFO overflow. */
-       if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
-               tp->irq_mask |= RxFIFOOver;
-               tp->irq_mask &= ~RxOverflow;
-       }
-
-       switch (tp->mac_version) {
-       case RTL_GIGA_MAC_VER_11:
-               rtl_hw_start_8168bb(tp);
-               break;
-
-       case RTL_GIGA_MAC_VER_12:
-       case RTL_GIGA_MAC_VER_17:
-               rtl_hw_start_8168bef(tp);
-               break;
-
-       case RTL_GIGA_MAC_VER_18:
-               rtl_hw_start_8168cp_1(tp);
-               break;
-
-       case RTL_GIGA_MAC_VER_19:
-               rtl_hw_start_8168c_1(tp);
-               break;
-
-       case RTL_GIGA_MAC_VER_20:
-               rtl_hw_start_8168c_2(tp);
-               break;
-
-       case RTL_GIGA_MAC_VER_21:
-               rtl_hw_start_8168c_3(tp);
-               break;
-
-       case RTL_GIGA_MAC_VER_22:
-               rtl_hw_start_8168c_4(tp);
-               break;
-
-       case RTL_GIGA_MAC_VER_23:
-               rtl_hw_start_8168cp_2(tp);
-               break;
-
-       case RTL_GIGA_MAC_VER_24:
-               rtl_hw_start_8168cp_3(tp);
-               break;
-
-       case RTL_GIGA_MAC_VER_25:
-       case RTL_GIGA_MAC_VER_26:
-       case RTL_GIGA_MAC_VER_27:
-               rtl_hw_start_8168d(tp);
-               break;
-
-       case RTL_GIGA_MAC_VER_28:
-               rtl_hw_start_8168d_4(tp);
-               break;
-
-       case RTL_GIGA_MAC_VER_31:
-               rtl_hw_start_8168dp(tp);
-               break;
-
-       case RTL_GIGA_MAC_VER_32:
-       case RTL_GIGA_MAC_VER_33:
-               rtl_hw_start_8168e_1(tp);
-               break;
-       case RTL_GIGA_MAC_VER_34:
-               rtl_hw_start_8168e_2(tp);
-               break;
-
-       case RTL_GIGA_MAC_VER_35:
-       case RTL_GIGA_MAC_VER_36:
-               rtl_hw_start_8168f_1(tp);
-               break;
-
-       case RTL_GIGA_MAC_VER_38:
-               rtl_hw_start_8411(tp);
-               break;
-
-       case RTL_GIGA_MAC_VER_40:
-       case RTL_GIGA_MAC_VER_41:
-               rtl_hw_start_8168g_1(tp);
-               break;
-       case RTL_GIGA_MAC_VER_42:
-               rtl_hw_start_8168g_2(tp);
-               break;
-
-       case RTL_GIGA_MAC_VER_44:
-               rtl_hw_start_8411_2(tp);
-               break;
-
-       case RTL_GIGA_MAC_VER_45:
-       case RTL_GIGA_MAC_VER_46:
-               rtl_hw_start_8168h_1(tp);
-               break;
-
-       case RTL_GIGA_MAC_VER_49:
-               rtl_hw_start_8168ep_1(tp);
-               break;
-
-       case RTL_GIGA_MAC_VER_50:
-               rtl_hw_start_8168ep_2(tp);
-               break;
-
-       case RTL_GIGA_MAC_VER_51:
-               rtl_hw_start_8168ep_3(tp);
-               break;
-
-       default:
-               netif_err(tp, drv, tp->dev,
-                         "unknown chipset (mac_version = %d)\n",
-                         tp->mac_version);
-               break;
-       }
-}
-
 static void rtl_hw_start_8102e_1(struct rtl8169_private *tp)
 {
        static const struct ephy_info e_info_8102e_1[] = {
@@ -5699,6 +5506,73 @@ static void rtl_hw_start_8106(struct rtl8169_private *tp)
        rtl_hw_aspm_clkreq_enable(tp, true);
 }
 
+static void rtl_hw_config(struct rtl8169_private *tp)
+{
+       static const rtl_generic_fct hw_configs[] = {
+               [RTL_GIGA_MAC_VER_07] = rtl_hw_start_8102e_1,
+               [RTL_GIGA_MAC_VER_08] = rtl_hw_start_8102e_3,
+               [RTL_GIGA_MAC_VER_09] = rtl_hw_start_8102e_2,
+               [RTL_GIGA_MAC_VER_10] = NULL,
+               [RTL_GIGA_MAC_VER_11] = rtl_hw_start_8168bb,
+               [RTL_GIGA_MAC_VER_12] = rtl_hw_start_8168bef,
+               [RTL_GIGA_MAC_VER_13] = NULL,
+               [RTL_GIGA_MAC_VER_14] = NULL,
+               [RTL_GIGA_MAC_VER_15] = NULL,
+               [RTL_GIGA_MAC_VER_16] = NULL,
+               [RTL_GIGA_MAC_VER_17] = rtl_hw_start_8168bef,
+               [RTL_GIGA_MAC_VER_18] = rtl_hw_start_8168cp_1,
+               [RTL_GIGA_MAC_VER_19] = rtl_hw_start_8168c_1,
+               [RTL_GIGA_MAC_VER_20] = rtl_hw_start_8168c_2,
+               [RTL_GIGA_MAC_VER_21] = rtl_hw_start_8168c_3,
+               [RTL_GIGA_MAC_VER_22] = rtl_hw_start_8168c_4,
+               [RTL_GIGA_MAC_VER_23] = rtl_hw_start_8168cp_2,
+               [RTL_GIGA_MAC_VER_24] = rtl_hw_start_8168cp_3,
+               [RTL_GIGA_MAC_VER_25] = rtl_hw_start_8168d,
+               [RTL_GIGA_MAC_VER_26] = rtl_hw_start_8168d,
+               [RTL_GIGA_MAC_VER_27] = rtl_hw_start_8168d,
+               [RTL_GIGA_MAC_VER_28] = rtl_hw_start_8168d_4,
+               [RTL_GIGA_MAC_VER_29] = rtl_hw_start_8105e_1,
+               [RTL_GIGA_MAC_VER_30] = rtl_hw_start_8105e_2,
+               [RTL_GIGA_MAC_VER_31] = rtl_hw_start_8168dp,
+               [RTL_GIGA_MAC_VER_32] = rtl_hw_start_8168e_1,
+               [RTL_GIGA_MAC_VER_33] = rtl_hw_start_8168e_1,
+               [RTL_GIGA_MAC_VER_34] = rtl_hw_start_8168e_2,
+               [RTL_GIGA_MAC_VER_35] = rtl_hw_start_8168f_1,
+               [RTL_GIGA_MAC_VER_36] = rtl_hw_start_8168f_1,
+               [RTL_GIGA_MAC_VER_37] = rtl_hw_start_8402,
+               [RTL_GIGA_MAC_VER_38] = rtl_hw_start_8411,
+               [RTL_GIGA_MAC_VER_39] = rtl_hw_start_8106,
+               [RTL_GIGA_MAC_VER_40] = rtl_hw_start_8168g_1,
+               [RTL_GIGA_MAC_VER_41] = rtl_hw_start_8168g_1,
+               [RTL_GIGA_MAC_VER_42] = rtl_hw_start_8168g_2,
+               [RTL_GIGA_MAC_VER_43] = rtl_hw_start_8168g_2,
+               [RTL_GIGA_MAC_VER_44] = rtl_hw_start_8411_2,
+               [RTL_GIGA_MAC_VER_45] = rtl_hw_start_8168h_1,
+               [RTL_GIGA_MAC_VER_46] = rtl_hw_start_8168h_1,
+               [RTL_GIGA_MAC_VER_47] = rtl_hw_start_8168h_1,
+               [RTL_GIGA_MAC_VER_48] = rtl_hw_start_8168h_1,
+               [RTL_GIGA_MAC_VER_49] = rtl_hw_start_8168ep_1,
+               [RTL_GIGA_MAC_VER_50] = rtl_hw_start_8168ep_2,
+               [RTL_GIGA_MAC_VER_51] = rtl_hw_start_8168ep_3,
+       };
+
+       if (hw_configs[tp->mac_version])
+               hw_configs[tp->mac_version](tp);
+}
+
+static void rtl_hw_start_8168(struct rtl8169_private *tp)
+{
+       RTL_W8(tp, MaxTxPacketSize, TxPacketMax);
+
+       /* Workaround for RxFIFO overflow. */
+       if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
+               tp->irq_mask |= RxFIFOOver;
+               tp->irq_mask &= ~RxOverflow;
+       }
+
+       rtl_hw_config(tp);
+}
+
 static void rtl_hw_start_8101(struct rtl8169_private *tp)
 {
        if (tp->mac_version >= RTL_GIGA_MAC_VER_30)
@@ -5714,43 +5588,7 @@ static void rtl_hw_start_8101(struct rtl8169_private *tp)
        tp->cp_cmd &= CPCMD_QUIRK_MASK;
        RTL_W16(tp, CPlusCmd, tp->cp_cmd);
 
-       switch (tp->mac_version) {
-       case RTL_GIGA_MAC_VER_07:
-               rtl_hw_start_8102e_1(tp);
-               break;
-
-       case RTL_GIGA_MAC_VER_08:
-               rtl_hw_start_8102e_3(tp);
-               break;
-
-       case RTL_GIGA_MAC_VER_09:
-               rtl_hw_start_8102e_2(tp);
-               break;
-
-       case RTL_GIGA_MAC_VER_29:
-               rtl_hw_start_8105e_1(tp);
-               break;
-       case RTL_GIGA_MAC_VER_30:
-               rtl_hw_start_8105e_2(tp);
-               break;
-
-       case RTL_GIGA_MAC_VER_37:
-               rtl_hw_start_8402(tp);
-               break;
-
-       case RTL_GIGA_MAC_VER_39:
-               rtl_hw_start_8106(tp);
-               break;
-       case RTL_GIGA_MAC_VER_43:
-               rtl_hw_start_8168g_2(tp);
-               break;
-       case RTL_GIGA_MAC_VER_47:
-       case RTL_GIGA_MAC_VER_48:
-               rtl_hw_start_8168h_1(tp);
-               break;
-       }
-
-       RTL_W16(tp, IntrMitigate, 0x0000);
+       rtl_hw_config(tp);
 }
 
 static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
@@ -6542,10 +6380,8 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
                set_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags);
        }
 
-       if (status & (RTL_EVENT_NAPI | LinkChg)) {
-               rtl_irq_disable(tp);
-               napi_schedule_irqoff(&tp->napi);
-       }
+       rtl_irq_disable(tp);
+       napi_schedule_irqoff(&tp->napi);
 out:
        rtl_ack_events(tp, status);
 
@@ -7352,6 +7188,11 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (rc)
                return rc;
 
+       /* Disable ASPM completely as that cause random device stop working
+        * problems as well as full system hangs for some PCIe devices users.
+        */
+       pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
+
        /* enable device (incl. PCI PM wakeup and hotplug setup) */
        rc = pcim_enable_device(pdev);
        if (rc < 0) {
index a71c900ca04f50396928d7a441cb57f429cf6737..7ae6c124bfe92ae360d754cf679ef4a166aa374d 100644 (file)
@@ -2207,6 +2207,15 @@ static int rocker_router_fib_event(struct notifier_block *nb,
        switch (event) {
        case FIB_EVENT_ENTRY_ADD: /* fall through */
        case FIB_EVENT_ENTRY_DEL:
+               if (info->family == AF_INET) {
+                       struct fib_entry_notifier_info *fen_info = ptr;
+
+                       if (fen_info->fi->fib_nh_is_v6) {
+                               NL_SET_ERR_MSG_MOD(info->extack, "IPv6 gateway with IPv4 route is not supported");
+                               return notifier_from_errno(-EINVAL);
+                       }
+               }
+
                memcpy(&fib_work->fen_info, ptr, sizeof(fib_work->fen_info));
                /* Take referece on fib_info to prevent it from being
                 * freed while work is queued. Release it afterwards.
index 3409bbf5b19fffbc5ec3538e592173f854eb8791..c5059f456f374efe4baff7b64894de70df1bfdc1 100644 (file)
@@ -321,7 +321,7 @@ netdev_tx_t ef4_enqueue_skb(struct ef4_tx_queue *tx_queue, struct sk_buff *skb)
        netdev_tx_sent_queue(tx_queue->core_txq, skb_len);
 
        /* Pass off to hardware */
-       if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq)) {
+       if (!netdev_xmit_more() || netif_xmit_stopped(tx_queue->core_txq)) {
                struct ef4_tx_queue *txq2 = ef4_tx_queue_partner(tx_queue);
 
                /* There could be packets left on the partner queue if those
@@ -333,7 +333,7 @@ netdev_tx_t ef4_enqueue_skb(struct ef4_tx_queue *tx_queue, struct sk_buff *skb)
 
                ef4_nic_push_buffers(tx_queue);
        } else {
-               tx_queue->xmit_more_available = skb->xmit_more;
+               tx_queue->xmit_more_available = netdev_xmit_more();
        }
 
        tx_queue->tx_packets++;
index 06c8f282263f7e23b63094dfb95870794013fa85..e182055ec2ebad88802b104540170f6e8936c4a6 100644 (file)
@@ -478,8 +478,6 @@ static int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue,
                next = skb->next;
                skb->next = NULL;
 
-               if (next)
-                       skb->xmit_more = true;
                efx_enqueue_skb(tx_queue, skb);
                skb = next;
        }
@@ -506,7 +504,7 @@ static int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue,
 netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
 {
        unsigned int old_insert_count = tx_queue->insert_count;
-       bool xmit_more = skb->xmit_more;
+       bool xmit_more = netdev_xmit_more();
        bool data_mapped = false;
        unsigned int segments;
        unsigned int skb_len;
@@ -533,7 +531,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
                if (rc)
                        goto err;
 #ifdef EFX_USE_PIO
-       } else if (skb_len <= efx_piobuf_size && !skb->xmit_more &&
+       } else if (skb_len <= efx_piobuf_size && !xmit_more &&
                   efx_nic_may_tx_pio(tx_queue)) {
                /* Use PIO for short packets with an empty queue. */
                if (efx_enqueue_skb_pio(tx_queue, skb))
@@ -559,8 +557,8 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
        if (__netdev_tx_sent_queue(tx_queue->core_txq, skb_len, xmit_more)) {
                struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue);
 
-               /* There could be packets left on the partner queue if those
-                * SKBs had skb->xmit_more set. If we do not push those they
+               /* There could be packets left on the partner queue if
+                * xmit_more was set. If we do not push those they
                 * could be left for a long time and cause a netdev watchdog.
                 */
                if (txq2->xmit_more_available)
@@ -568,7 +566,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
 
                efx_nic_push_buffers(tx_queue);
        } else {
-               tx_queue->xmit_more_available = skb->xmit_more;
+               tx_queue->xmit_more_available = xmit_more;
        }
 
        if (segments) {
index 40d6356a7e73c213f0d1d073387b8605bb4f3726..3dfb07a78952533420da7cb1cb6b87b171d91661 100644 (file)
 /* Specific functions used for Ring mode */
 
 /* Enhanced descriptors */
-static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end)
+static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end,
+                                          int bfsize)
 {
-       p->des1 |= cpu_to_le32((BUF_SIZE_8KiB
-                       << ERDES1_BUFFER2_SIZE_SHIFT)
-                  & ERDES1_BUFFER2_SIZE_MASK);
+       if (bfsize == BUF_SIZE_16KiB)
+               p->des1 |= cpu_to_le32((BUF_SIZE_8KiB
+                               << ERDES1_BUFFER2_SIZE_SHIFT)
+                          & ERDES1_BUFFER2_SIZE_MASK);
 
        if (end)
                p->des1 |= cpu_to_le32(ERDES1_END_RING);
@@ -59,11 +61,15 @@ static inline void enh_set_tx_desc_len_on_ring(struct dma_desc *p, int len)
 }
 
 /* Normal descriptors */
-static inline void ndesc_rx_set_on_ring(struct dma_desc *p, int end)
+static inline void ndesc_rx_set_on_ring(struct dma_desc *p, int end, int bfsize)
 {
-       p->des1 |= cpu_to_le32(((BUF_SIZE_2KiB - 1)
-                               << RDES1_BUFFER2_SIZE_SHIFT)
-                   & RDES1_BUFFER2_SIZE_MASK);
+       if (bfsize >= BUF_SIZE_2KiB) {
+               int bfsize2;
+
+               bfsize2 = min(bfsize - BUF_SIZE_2KiB + 1, BUF_SIZE_2KiB - 1);
+               p->des1 |= cpu_to_le32((bfsize2 << RDES1_BUFFER2_SIZE_SHIFT)
+                           & RDES1_BUFFER2_SIZE_MASK);
+       }
 
        if (end)
                p->des1 |= cpu_to_le32(RDES1_END_RING);
index 7fbb6a4dbf5107723f16b825e7a3b1577c97254a..e061e9f5fad71f065440605646da812d1b9daf51 100644 (file)
@@ -296,7 +296,7 @@ static int dwmac4_wrback_get_rx_timestamp_status(void *desc, void *next_desc,
 }
 
 static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
-                                  int mode, int end)
+                                  int mode, int end, int bfsize)
 {
        dwmac4_set_rx_owner(p, disable_rx_ic);
 }
index 37d5e6fe74737f6c82acc223e2c93b043b7bb9f4..085b700a4994e599ec5ff2044c690a614f5ac4e6 100644 (file)
 #define XGMAC_RSF                      BIT(5)
 #define XGMAC_RTC                      GENMASK(1, 0)
 #define XGMAC_RTC_SHIFT                        0
+#define XGMAC_MTL_RXQ_FLOW_CONTROL(x)  (0x00001150 + (0x80 * (x)))
+#define XGMAC_RFD                      GENMASK(31, 17)
+#define XGMAC_RFD_SHIFT                        17
+#define XGMAC_RFA                      GENMASK(15, 1)
+#define XGMAC_RFA_SHIFT                        1
 #define XGMAC_MTL_QINTEN(x)            (0x00001170 + (0x80 * (x)))
 #define XGMAC_RXOIE                    BIT(16)
 #define XGMAC_MTL_QINT_STATUS(x)       (0x00001174 + (0x80 * (x)))
index 1d858fdec99718ec63a5fa1064fe9dd99670d2e0..98fa471da7c0f2764729f98c7044f52e068c1db9 100644 (file)
@@ -123,7 +123,7 @@ static int dwxgmac2_get_rx_timestamp_status(void *desc, void *next_desc,
 }
 
 static void dwxgmac2_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
-                                 int mode, int end)
+                                 int mode, int end, int bfsize)
 {
        dwxgmac2_set_rx_owner(p, disable_rx_ic);
 }
index 2ba712b48a89ab3f71997666845b1804ef450e88..e79037f511e16c6d8bc7085c1612e3f5b0d81912 100644 (file)
@@ -147,6 +147,52 @@ static void dwxgmac2_dma_rx_mode(void __iomem *ioaddr, int mode,
        value &= ~XGMAC_RQS;
        value |= (rqs << XGMAC_RQS_SHIFT) & XGMAC_RQS;
 
+       if ((fifosz >= 4096) && (qmode != MTL_QUEUE_AVB)) {
+               u32 flow = readl(ioaddr + XGMAC_MTL_RXQ_FLOW_CONTROL(channel));
+               unsigned int rfd, rfa;
+
+               value |= XGMAC_EHFC;
+
+               /* Set Threshold for Activating Flow Control to min 2 frames,
+                * i.e. 1500 * 2 = 3000 bytes.
+                *
+                * Set Threshold for Deactivating Flow Control to min 1 frame,
+                * i.e. 1500 bytes.
+                */
+               switch (fifosz) {
+               case 4096:
+                       /* This violates the above formula because of FIFO size
+                        * limit therefore overflow may occur in spite of this.
+                        */
+                       rfd = 0x03; /* Full-2.5K */
+                       rfa = 0x01; /* Full-1.5K */
+                       break;
+
+               case 8192:
+                       rfd = 0x06; /* Full-4K */
+                       rfa = 0x0a; /* Full-6K */
+                       break;
+
+               case 16384:
+                       rfd = 0x06; /* Full-4K */
+                       rfa = 0x12; /* Full-10K */
+                       break;
+
+               default:
+                       rfd = 0x06; /* Full-4K */
+                       rfa = 0x1e; /* Full-16K */
+                       break;
+               }
+
+               flow &= ~XGMAC_RFD;
+               flow |= rfd << XGMAC_RFD_SHIFT;
+
+               flow &= ~XGMAC_RFA;
+               flow |= rfa << XGMAC_RFA_SHIFT;
+
+               writel(flow, ioaddr + XGMAC_MTL_RXQ_FLOW_CONTROL(channel));
+       }
+
        writel(value, ioaddr + XGMAC_MTL_RXQ_OPMODE(channel));
 
        /* Enable MTL RX overflow */
index 5ef91a790f9d16fbd122f71e130cf7ecf5249a68..5202d6ad79194b0ed9134a7905d0aaa4309c6822 100644 (file)
@@ -201,6 +201,11 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
        if (unlikely(rdes0 & RDES0_OWN))
                return dma_own;
 
+       if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) {
+               stats->rx_length_errors++;
+               return discard_frame;
+       }
+
        if (unlikely(rdes0 & RDES0_ERROR_SUMMARY)) {
                if (unlikely(rdes0 & RDES0_DESCRIPTOR_ERROR)) {
                        x->rx_desc++;
@@ -231,9 +236,10 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
         * It doesn't match with the information reported into the databook.
         * At any rate, we need to understand if the CSUM hw computation is ok
         * and report this info to the upper layers. */
-       ret = enh_desc_coe_rdes0(!!(rdes0 & RDES0_IPC_CSUM_ERROR),
-                                !!(rdes0 & RDES0_FRAME_TYPE),
-                                !!(rdes0 & ERDES0_RX_MAC_ADDR));
+       if (likely(ret == good_frame))
+               ret = enh_desc_coe_rdes0(!!(rdes0 & RDES0_IPC_CSUM_ERROR),
+                                        !!(rdes0 & RDES0_FRAME_TYPE),
+                                        !!(rdes0 & ERDES0_RX_MAC_ADDR));
 
        if (unlikely(rdes0 & RDES0_DRIBBLING))
                x->dribbling_bit++;
@@ -259,15 +265,19 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
 }
 
 static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
-                                 int mode, int end)
+                                 int mode, int end, int bfsize)
 {
+       int bfsize1;
+
        p->des0 |= cpu_to_le32(RDES0_OWN);
-       p->des1 |= cpu_to_le32(BUF_SIZE_8KiB & ERDES1_BUFFER1_SIZE_MASK);
+
+       bfsize1 = min(bfsize, BUF_SIZE_8KiB);
+       p->des1 |= cpu_to_le32(bfsize1 & ERDES1_BUFFER1_SIZE_MASK);
 
        if (mode == STMMAC_CHAIN_MODE)
                ehn_desc_rx_set_on_chain(p);
        else
-               ehn_desc_rx_set_on_ring(p, end);
+               ehn_desc_rx_set_on_ring(p, end, bfsize);
 
        if (disable_rx_ic)
                p->des1 |= cpu_to_le32(ERDES1_DISABLE_IC);
index 92b8944f26e3c8566d9e68de4820d06231ca10bd..5bb00234d961c6a5a2385c90bc3fdf54ff96e4ca 100644 (file)
@@ -33,7 +33,7 @@ struct dma_extended_desc;
 struct stmmac_desc_ops {
        /* DMA RX descriptor ring initialization */
        void (*init_rx_desc)(struct dma_desc *p, int disable_rx_ic, int mode,
-                       int end);
+                       int end, int bfsize);
        /* DMA TX descriptor ring initialization */
        void (*init_tx_desc)(struct dma_desc *p, int mode, int end);
        /* Invoked by the xmit function to prepare the tx descriptor */
index de65bb29feba967cc7a0d6ff3184998f359dde34..b7dd4e3c760d82da1439fb0d8dd9d418c34256a2 100644 (file)
@@ -91,8 +91,6 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
                return dma_own;
 
        if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) {
-               pr_warn("%s: Oversized frame spanned multiple buffers\n",
-                       __func__);
                stats->rx_length_errors++;
                return discard_frame;
        }
@@ -135,15 +133,19 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
 }
 
 static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode,
-                              int end)
+                              int end, int bfsize)
 {
+       int bfsize1;
+
        p->des0 |= cpu_to_le32(RDES0_OWN);
-       p->des1 |= cpu_to_le32((BUF_SIZE_2KiB - 1) & RDES1_BUFFER1_SIZE_MASK);
+
+       bfsize1 = min(bfsize, BUF_SIZE_2KiB - 1);
+       p->des1 |= cpu_to_le32(bfsize & RDES1_BUFFER1_SIZE_MASK);
 
        if (mode == STMMAC_CHAIN_MODE)
                ndesc_rx_set_on_chain(p, end);
        else
-               ndesc_rx_set_on_ring(p, end);
+               ndesc_rx_set_on_ring(p, end, bfsize);
 
        if (disable_rx_ic)
                p->des1 |= cpu_to_le32(RDES1_DISABLE_IC);
index 6a2e1031a62ae3c4d16f7f09f4d09481ccfa325d..7a895a2889e306db9457045e766a26a87f5ae5ac 100644 (file)
@@ -74,7 +74,7 @@ MODULE_PARM_DESC(phyaddr, "Physical device address");
 #define STMMAC_TX_THRESH       (DMA_TX_SIZE / 4)
 #define STMMAC_RX_THRESH       (DMA_RX_SIZE / 4)
 
-static int flow_ctrl = FLOW_OFF;
+static int flow_ctrl = FLOW_AUTO;
 module_param(flow_ctrl, int, 0644);
 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
 
@@ -1136,11 +1136,13 @@ static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
                if (priv->extend_desc)
                        stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
                                        priv->use_riwt, priv->mode,
-                                       (i == DMA_RX_SIZE - 1));
+                                       (i == DMA_RX_SIZE - 1),
+                                       priv->dma_buf_sz);
                else
                        stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
                                        priv->use_riwt, priv->mode,
-                                       (i == DMA_RX_SIZE - 1));
+                                       (i == DMA_RX_SIZE - 1),
+                                       priv->dma_buf_sz);
 }
 
 /**
@@ -3352,9 +3354,8 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
 {
        struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
        struct stmmac_channel *ch = &priv->channel[queue];
-       unsigned int entry = rx_q->cur_rx;
+       unsigned int next_entry = rx_q->cur_rx;
        int coe = priv->hw->rx_csum;
-       unsigned int next_entry;
        unsigned int count = 0;
        bool xmac;
 
@@ -3372,10 +3373,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
                stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
        }
        while (count < limit) {
-               int status;
+               int entry, status;
                struct dma_desc *p;
                struct dma_desc *np;
 
+               entry = next_entry;
+
                if (priv->extend_desc)
                        p = (struct dma_desc *)(rx_q->dma_erx + entry);
                else
@@ -3431,11 +3434,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
                         *  ignored
                         */
                        if (frame_len > priv->dma_buf_sz) {
-                               netdev_err(priv->dev,
-                                          "len %d larger than size (%d)\n",
-                                          frame_len, priv->dma_buf_sz);
+                               if (net_ratelimit())
+                                       netdev_err(priv->dev,
+                                                  "len %d larger than size (%d)\n",
+                                                  frame_len, priv->dma_buf_sz);
                                priv->dev->stats.rx_length_errors++;
-                               break;
+                               continue;
                        }
 
                        /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
@@ -3470,7 +3474,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
                                                dev_warn(priv->device,
                                                         "packet dropped\n");
                                        priv->dev->stats.rx_dropped++;
-                                       break;
+                                       continue;
                                }
 
                                dma_sync_single_for_cpu(priv->device,
@@ -3490,11 +3494,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
                        } else {
                                skb = rx_q->rx_skbuff[entry];
                                if (unlikely(!skb)) {
-                                       netdev_err(priv->dev,
-                                                  "%s: Inconsistent Rx chain\n",
-                                                  priv->dev->name);
+                                       if (net_ratelimit())
+                                               netdev_err(priv->dev,
+                                                          "%s: Inconsistent Rx chain\n",
+                                                          priv->dev->name);
                                        priv->dev->stats.rx_dropped++;
-                                       break;
+                                       continue;
                                }
                                prefetch(skb->data - NET_IP_ALIGN);
                                rx_q->rx_skbuff[entry] = NULL;
@@ -3529,7 +3534,6 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
                        priv->dev->stats.rx_packets++;
                        priv->dev->stats.rx_bytes += frame_len;
                }
-               entry = next_entry;
        }
 
        stmmac_rx_refill(priv, queue);
index 99d86e39ff541c29a239b176ccfcb704c50e4ce8..bf6c1c6779ff37826fa27bf89be1a23bc6d33744 100644 (file)
@@ -995,7 +995,7 @@ static void xlgmac_dev_xmit(struct xlgmac_channel *channel)
        smp_wmb();
 
        ring->cur = cur_index + 1;
-       if (!pkt_info->skb->xmit_more ||
+       if (!netdev_xmit_more() ||
            netif_xmit_stopped(netdev_get_tx_queue(pdata->netdev,
                                                   channel->queue_index)))
                xlgmac_tx_start_xmit(channel, ring);
index a98aedae1b41d7baa424d6b5a51622ec3af5bee0..c2740dbe91541673e5d0a278c05cd5f8c1491abf 100644 (file)
@@ -140,7 +140,7 @@ static void davinci_mdio_init_clk(struct davinci_mdio_data *data)
 static void davinci_mdio_enable(struct davinci_mdio_data *data)
 {
        /* set enable and clock divider */
-       __raw_writel(data->clk_div | CONTROL_ENABLE, &data->regs->control);
+       writel(data->clk_div | CONTROL_ENABLE, &data->regs->control);
 }
 
 static int davinci_mdio_reset(struct mii_bus *bus)
@@ -159,7 +159,7 @@ static int davinci_mdio_reset(struct mii_bus *bus)
        msleep(PHY_MAX_ADDR * data->access_time);
 
        /* dump hardware version info */
-       ver = __raw_readl(&data->regs->version);
+       ver = readl(&data->regs->version);
        dev_info(data->dev,
                 "davinci mdio revision %d.%d, bus freq %ld\n",
                 (ver >> 8) & 0xff, ver & 0xff,
@@ -169,7 +169,7 @@ static int davinci_mdio_reset(struct mii_bus *bus)
                goto done;
 
        /* get phy mask from the alive register */
-       phy_mask = __raw_readl(&data->regs->alive);
+       phy_mask = readl(&data->regs->alive);
        if (phy_mask) {
                /* restrict mdio bus to live phys only */
                dev_info(data->dev, "detected phy mask %x\n", ~phy_mask);
@@ -196,11 +196,11 @@ static inline int wait_for_user_access(struct davinci_mdio_data *data)
        u32 reg;
 
        while (time_after(timeout, jiffies)) {
-               reg = __raw_readl(&regs->user[0].access);
+               reg = readl(&regs->user[0].access);
                if ((reg & USERACCESS_GO) == 0)
                        return 0;
 
-               reg = __raw_readl(&regs->control);
+               reg = readl(&regs->control);
                if ((reg & CONTROL_IDLE) == 0) {
                        usleep_range(100, 200);
                        continue;
@@ -216,7 +216,7 @@ static inline int wait_for_user_access(struct davinci_mdio_data *data)
                return -EAGAIN;
        }
 
-       reg = __raw_readl(&regs->user[0].access);
+       reg = readl(&regs->user[0].access);
        if ((reg & USERACCESS_GO) == 0)
                return 0;
 
@@ -263,7 +263,7 @@ static int davinci_mdio_read(struct mii_bus *bus, int phy_id, int phy_reg)
                if (ret < 0)
                        break;
 
-               __raw_writel(reg, &data->regs->user[0].access);
+               writel(reg, &data->regs->user[0].access);
 
                ret = wait_for_user_access(data);
                if (ret == -EAGAIN)
@@ -271,7 +271,7 @@ static int davinci_mdio_read(struct mii_bus *bus, int phy_id, int phy_reg)
                if (ret < 0)
                        break;
 
-               reg = __raw_readl(&data->regs->user[0].access);
+               reg = readl(&data->regs->user[0].access);
                ret = (reg & USERACCESS_ACK) ? (reg & USERACCESS_DATA) : -EIO;
                break;
        }
@@ -307,7 +307,7 @@ static int davinci_mdio_write(struct mii_bus *bus, int phy_id,
                if (ret < 0)
                        break;
 
-               __raw_writel(reg, &data->regs->user[0].access);
+               writel(reg, &data->regs->user[0].access);
 
                ret = wait_for_user_access(data);
                if (ret == -EAGAIN)
@@ -472,9 +472,9 @@ static int davinci_mdio_runtime_suspend(struct device *dev)
        u32 ctrl;
 
        /* shutdown the scan state machine */
-       ctrl = __raw_readl(&data->regs->control);
+       ctrl = readl(&data->regs->control);
        ctrl &= ~CONTROL_ENABLE;
-       __raw_writel(ctrl, &data->regs->control);
+       writel(ctrl, &data->regs->control);
        wait_for_idle(data);
 
        return 0;
index b03a417d0073ebc033fc442acfa9b2523366ed94..fc38692da71e0c72da9931a3b189f58179fc66fe 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/skbuff.h>
+#include <linux/ethtool.h>
 #include <linux/io.h>
 #include <linux/slab.h>
 #include <linux/of_address.h>
@@ -1078,6 +1079,27 @@ static bool get_bool(struct platform_device *ofdev, const char *s)
        return (bool)*p;
 }
 
+/**
+ * xemaclite_ethtools_get_drvinfo - Get various Axi Emac Lite driver info
+ * @ndev:       Pointer to net_device structure
+ * @ed:         Pointer to ethtool_drvinfo structure
+ *
+ * This implements ethtool command for getting the driver information.
+ * Issue "ethtool -i ethX" under linux prompt to execute this function.
+ */
+static void xemaclite_ethtools_get_drvinfo(struct net_device *ndev,
+                                          struct ethtool_drvinfo *ed)
+{
+       strlcpy(ed->driver, DRIVER_NAME, sizeof(ed->driver));
+}
+
+static const struct ethtool_ops xemaclite_ethtool_ops = {
+       .get_drvinfo    = xemaclite_ethtools_get_drvinfo,
+       .get_link       = ethtool_op_get_link,
+       .get_link_ksettings = phy_ethtool_get_link_ksettings,
+       .set_link_ksettings = phy_ethtool_set_link_ksettings,
+};
+
 static const struct net_device_ops xemaclite_netdev_ops;
 
 /**
@@ -1164,6 +1186,7 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
        dev_info(dev, "MAC address is now %pM\n", ndev->dev_addr);
 
        ndev->netdev_ops = &xemaclite_netdev_ops;
+       ndev->ethtool_ops = &xemaclite_ethtool_ops;
        ndev->flags &= ~IFF_MULTICAST;
        ndev->watchdog_timeo = TX_TIMEOUT;
 
@@ -1229,12 +1252,29 @@ xemaclite_poll_controller(struct net_device *ndev)
 }
 #endif
 
+/* Ioctl MII Interface */
+static int xemaclite_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+       if (!dev->phydev || !netif_running(dev))
+               return -EINVAL;
+
+       switch (cmd) {
+       case SIOCGMIIPHY:
+       case SIOCGMIIREG:
+       case SIOCSMIIREG:
+               return phy_mii_ioctl(dev->phydev, rq, cmd);
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
 static const struct net_device_ops xemaclite_netdev_ops = {
        .ndo_open               = xemaclite_open,
        .ndo_stop               = xemaclite_close,
        .ndo_start_xmit         = xemaclite_send,
        .ndo_set_mac_address    = xemaclite_set_mac_address,
        .ndo_tx_timeout         = xemaclite_tx_timeout,
+       .ndo_do_ioctl           = xemaclite_ioctl,
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller = xemaclite_poll_controller,
 #endif
index e859ae2e42d5a152a567de048e898eeafa99fcb5..49f41b64077bb9877d74ea40bbddffea5fe835d4 100644 (file)
@@ -987,6 +987,7 @@ struct netvsc_device {
 
        wait_queue_head_t wait_drain;
        bool destroy;
+       bool tx_disable; /* if true, do not wake up queue again */
 
        /* Receive buffer allocated by us but manages by NetVSP */
        void *recv_buf;
index 813d195bbd57fed2ef96ea708b637ca8197be458..fdbeb7070d42c7f5f2f140af69e9fb6506edc83c 100644 (file)
@@ -110,6 +110,7 @@ static struct netvsc_device *alloc_net_device(void)
 
        init_waitqueue_head(&net_device->wait_drain);
        net_device->destroy = false;
+       net_device->tx_disable = false;
 
        net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
        net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
@@ -719,7 +720,7 @@ static void netvsc_send_tx_complete(struct net_device *ndev,
        } else {
                struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx);
 
-               if (netif_tx_queue_stopped(txq) &&
+               if (netif_tx_queue_stopped(txq) && !net_device->tx_disable &&
                    (hv_get_avail_to_write_percent(&channel->outbound) >
                     RING_AVAIL_PERCENT_HIWATER || queue_sends < 1)) {
                        netif_tx_wake_queue(txq);
@@ -874,7 +875,8 @@ static inline int netvsc_send_pkt(
        } else if (ret == -EAGAIN) {
                netif_tx_stop_queue(txq);
                ndev_ctx->eth_stats.stop_queue++;
-               if (atomic_read(&nvchan->queue_sends) < 1) {
+               if (atomic_read(&nvchan->queue_sends) < 1 &&
+                   !net_device->tx_disable) {
                        netif_tx_wake_queue(txq);
                        ndev_ctx->eth_stats.wake_queue++;
                        ret = -ENOSPC;
@@ -964,7 +966,7 @@ int netvsc_send(struct net_device *ndev,
        /* Keep aggregating only if stack says more data is coming
         * and not doing mixed modes send and not flow blocked
         */
-       xmit_more = skb->xmit_more &&
+       xmit_more = netdev_xmit_more() &&
                !packet->cp_partial &&
                !netif_xmit_stopped(netdev_get_tx_queue(ndev, packet->q_idx));
 
index 1a08679f90ceed468c1385ecf1834ffc6682f06e..06393b2151021d4cafa4f7308f4070ad7f741932 100644 (file)
@@ -109,6 +109,15 @@ static void netvsc_set_rx_mode(struct net_device *net)
        rcu_read_unlock();
 }
 
+static void netvsc_tx_enable(struct netvsc_device *nvscdev,
+                            struct net_device *ndev)
+{
+       nvscdev->tx_disable = false;
+       virt_wmb(); /* ensure queue wake up mechanism is on */
+
+       netif_tx_wake_all_queues(ndev);
+}
+
 static int netvsc_open(struct net_device *net)
 {
        struct net_device_context *ndev_ctx = netdev_priv(net);
@@ -129,7 +138,7 @@ static int netvsc_open(struct net_device *net)
        rdev = nvdev->extension;
        if (!rdev->link_state) {
                netif_carrier_on(net);
-               netif_tx_wake_all_queues(net);
+               netvsc_tx_enable(nvdev, net);
        }
 
        if (vf_netdev) {
@@ -184,6 +193,17 @@ static int netvsc_wait_until_empty(struct netvsc_device *nvdev)
        }
 }
 
+static void netvsc_tx_disable(struct netvsc_device *nvscdev,
+                             struct net_device *ndev)
+{
+       if (nvscdev) {
+               nvscdev->tx_disable = true;
+               virt_wmb(); /* ensure txq will not wake up after stop */
+       }
+
+       netif_tx_disable(ndev);
+}
+
 static int netvsc_close(struct net_device *net)
 {
        struct net_device_context *net_device_ctx = netdev_priv(net);
@@ -192,7 +212,7 @@ static int netvsc_close(struct net_device *net)
        struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
        int ret;
 
-       netif_tx_disable(net);
+       netvsc_tx_disable(nvdev, net);
 
        /* No need to close rndis filter if it is removed already */
        if (!nvdev)
@@ -918,7 +938,7 @@ static int netvsc_detach(struct net_device *ndev,
 
        /* If device was up (receiving) then shutdown */
        if (netif_running(ndev)) {
-               netif_tx_disable(ndev);
+               netvsc_tx_disable(nvdev, ndev);
 
                ret = rndis_filter_close(nvdev);
                if (ret) {
@@ -1906,7 +1926,7 @@ static void netvsc_link_change(struct work_struct *w)
                if (rdev->link_state) {
                        rdev->link_state = false;
                        netif_carrier_on(net);
-                       netif_tx_wake_all_queues(net);
+                       netvsc_tx_enable(net_device, net);
                } else {
                        notify = true;
                }
@@ -1916,7 +1936,7 @@ static void netvsc_link_change(struct work_struct *w)
                if (!rdev->link_state) {
                        rdev->link_state = true;
                        netif_carrier_off(net);
-                       netif_tx_stop_all_queues(net);
+                       netvsc_tx_disable(net_device, net);
                }
                kfree(event);
                break;
@@ -1925,7 +1945,7 @@ static void netvsc_link_change(struct work_struct *w)
                if (!rdev->link_state) {
                        rdev->link_state = true;
                        netif_carrier_off(net);
-                       netif_tx_stop_all_queues(net);
+                       netvsc_tx_disable(net_device, net);
                        event->event = RNDIS_STATUS_MEDIA_CONNECT;
                        spin_lock_irqsave(&ndev_ctx->lock, flags);
                        list_add(&event->list, &ndev_ctx->reconfig_events);
index 2df7f60fe05220c19896a251b6b15239f4b95112..857e4bf998835eaeb6c4bf776dd77adabb0f36c9 100644 (file)
@@ -128,21 +128,9 @@ static u32 always_on(struct net_device *dev)
        return 1;
 }
 
-static int loopback_get_ts_info(struct net_device *netdev,
-                               struct ethtool_ts_info *ts_info)
-{
-       ts_info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
-                                  SOF_TIMESTAMPING_RX_SOFTWARE |
-                                  SOF_TIMESTAMPING_SOFTWARE;
-
-       ts_info->phc_index = -1;
-
-       return 0;
-};
-
 static const struct ethtool_ops loopback_ethtool_ops = {
        .get_link               = always_on,
-       .get_ts_info            = loopback_get_ts_info,
+       .get_ts_info            = ethtool_op_get_ts_info,
 };
 
 static int loopback_dev_init(struct net_device *dev)
index 947c40f112d1d28f2f6169a7ccdac9699d7000f1..263bfafdb0049ffccbae486b48b14469ceaed3a8 100644 (file)
@@ -2175,8 +2175,9 @@ static int copy_tx_sa_stats(struct sk_buff *skb,
        return 0;
 }
 
-static int copy_rx_sa_stats(struct sk_buff *skb,
-                           struct macsec_rx_sa_stats __percpu *pstats)
+static noinline_for_stack int
+copy_rx_sa_stats(struct sk_buff *skb,
+                struct macsec_rx_sa_stats __percpu *pstats)
 {
        struct macsec_rx_sa_stats sum = {0, };
        int cpu;
@@ -2201,8 +2202,8 @@ static int copy_rx_sa_stats(struct sk_buff *skb,
        return 0;
 }
 
-static int copy_rx_sc_stats(struct sk_buff *skb,
-                           struct pcpu_rx_sc_stats __percpu *pstats)
+static noinline_for_stack int
+copy_rx_sc_stats(struct sk_buff *skb, struct pcpu_rx_sc_stats __percpu *pstats)
 {
        struct macsec_rx_sc_stats sum = {0, };
        int cpu;
@@ -2265,8 +2266,8 @@ static int copy_rx_sc_stats(struct sk_buff *skb,
        return 0;
 }
 
-static int copy_tx_sc_stats(struct sk_buff *skb,
-                           struct pcpu_tx_sc_stats __percpu *pstats)
+static noinline_for_stack int
+copy_tx_sc_stats(struct sk_buff *skb, struct pcpu_tx_sc_stats __percpu *pstats)
 {
        struct macsec_tx_sc_stats sum = {0, };
        int cpu;
@@ -2305,8 +2306,8 @@ static int copy_tx_sc_stats(struct sk_buff *skb,
        return 0;
 }
 
-static int copy_secy_stats(struct sk_buff *skb,
-                          struct pcpu_secy_stats __percpu *pstats)
+static noinline_for_stack int
+copy_secy_stats(struct sk_buff *skb, struct pcpu_secy_stats __percpu *pstats)
 {
        struct macsec_dev_stats sum = {0, };
        int cpu;
@@ -2410,8 +2411,9 @@ static int nla_put_secy(struct macsec_secy *secy, struct sk_buff *skb)
        return 1;
 }
 
-static int dump_secy(struct macsec_secy *secy, struct net_device *dev,
-                    struct sk_buff *skb, struct netlink_callback *cb)
+static noinline_for_stack int
+dump_secy(struct macsec_secy *secy, struct net_device *dev,
+         struct sk_buff *skb, struct netlink_callback *cb)
 {
        struct macsec_rx_sc *rx_sc;
        struct macsec_tx_sc *tx_sc = &secy->tx_sc;
index 0fee1d06c0848f274baedb51ef30594cf4358332..cdf8611d28119ef176522c10add9588b8a0d642e 100644 (file)
@@ -3,17 +3,13 @@
 obj-$(CONFIG_NETDEVSIM) += netdevsim.o
 
 netdevsim-objs := \
-       netdev.o \
+       netdev.o devlink.o fib.o sdev.o \
 
 ifeq ($(CONFIG_BPF_SYSCALL),y)
 netdevsim-objs += \
        bpf.o
 endif
 
-ifneq ($(CONFIG_NET_DEVLINK),)
-netdevsim-objs += devlink.o fib.o
-endif
-
 ifneq ($(CONFIG_XFRM_OFFLOAD),)
 netdevsim-objs += ipsec.o
 endif
index f92c43453ec67e1408860561d6a9fe221158b7b3..a93aafe87db34586ca580f050d5c4034c7e31361 100644 (file)
@@ -27,7 +27,7 @@
        bpf_verifier_log_write(env, "[netdevsim] " fmt, ##__VA_ARGS__)
 
 struct nsim_bpf_bound_prog {
-       struct netdevsim *ns;
+       struct netdevsim_shared_dev *sdev;
        struct bpf_prog *prog;
        struct dentry *ddir;
        const char *state;
@@ -65,8 +65,8 @@ nsim_bpf_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn)
        struct nsim_bpf_bound_prog *state;
 
        state = env->prog->aux->offload->dev_priv;
-       if (state->ns->bpf_bind_verifier_delay && !insn_idx)
-               msleep(state->ns->bpf_bind_verifier_delay);
+       if (state->sdev->bpf_bind_verifier_delay && !insn_idx)
+               msleep(state->sdev->bpf_bind_verifier_delay);
 
        if (insn_idx == env->prog->len - 1)
                pr_vlog(env, "Hello from netdevsim!\n");
@@ -213,7 +213,8 @@ nsim_xdp_set_prog(struct netdevsim *ns, struct netdev_bpf *bpf,
        return 0;
 }
 
-static int nsim_bpf_create_prog(struct netdevsim *ns, struct bpf_prog *prog)
+static int nsim_bpf_create_prog(struct netdevsim_shared_dev *sdev,
+                               struct bpf_prog *prog)
 {
        struct nsim_bpf_bound_prog *state;
        char name[16];
@@ -222,13 +223,13 @@ static int nsim_bpf_create_prog(struct netdevsim *ns, struct bpf_prog *prog)
        if (!state)
                return -ENOMEM;
 
-       state->ns = ns;
+       state->sdev = sdev;
        state->prog = prog;
        state->state = "verify";
 
        /* Program id is not populated yet when we create the state. */
-       sprintf(name, "%u", ns->sdev->prog_id_gen++);
-       state->ddir = debugfs_create_dir(name, ns->sdev->ddir_bpf_bound_progs);
+       sprintf(name, "%u", sdev->prog_id_gen++);
+       state->ddir = debugfs_create_dir(name, sdev->ddir_bpf_bound_progs);
        if (IS_ERR_OR_NULL(state->ddir)) {
                kfree(state);
                return -ENOMEM;
@@ -239,7 +240,7 @@ static int nsim_bpf_create_prog(struct netdevsim *ns, struct bpf_prog *prog)
                            &state->state, &nsim_bpf_string_fops);
        debugfs_create_bool("loaded", 0400, state->ddir, &state->is_loaded);
 
-       list_add_tail(&state->l, &ns->sdev->bpf_bound_progs);
+       list_add_tail(&state->l, &sdev->bpf_bound_progs);
 
        prog->aux->offload->dev_priv = state;
 
@@ -248,12 +249,13 @@ static int nsim_bpf_create_prog(struct netdevsim *ns, struct bpf_prog *prog)
 
 static int nsim_bpf_verifier_prep(struct bpf_prog *prog)
 {
-       struct netdevsim *ns = bpf_offload_dev_priv(prog->aux->offload->offdev);
+       struct netdevsim_shared_dev *sdev =
+                       bpf_offload_dev_priv(prog->aux->offload->offdev);
 
-       if (!ns->bpf_bind_accept)
+       if (!sdev->bpf_bind_accept)
                return -EOPNOTSUPP;
 
-       return nsim_bpf_create_prog(ns, prog);
+       return nsim_bpf_create_prog(sdev, prog);
 }
 
 static int nsim_bpf_translate(struct bpf_prog *prog)
@@ -576,39 +578,55 @@ int nsim_bpf(struct net_device *dev, struct netdev_bpf *bpf)
        }
 }
 
-int nsim_bpf_init(struct netdevsim *ns)
+static int nsim_bpf_sdev_init(struct netdevsim_shared_dev *sdev)
 {
        int err;
 
-       if (ns->sdev->refcnt == 1) {
-               INIT_LIST_HEAD(&ns->sdev->bpf_bound_progs);
-               INIT_LIST_HEAD(&ns->sdev->bpf_bound_maps);
+       INIT_LIST_HEAD(&sdev->bpf_bound_progs);
+       INIT_LIST_HEAD(&sdev->bpf_bound_maps);
 
-               ns->sdev->ddir_bpf_bound_progs =
-                       debugfs_create_dir("bpf_bound_progs", ns->sdev->ddir);
-               if (IS_ERR_OR_NULL(ns->sdev->ddir_bpf_bound_progs))
-                       return -ENOMEM;
+       sdev->ddir_bpf_bound_progs =
+               debugfs_create_dir("bpf_bound_progs", sdev->ddir);
+       if (IS_ERR_OR_NULL(sdev->ddir_bpf_bound_progs))
+               return -ENOMEM;
+
+       sdev->bpf_dev = bpf_offload_dev_create(&nsim_bpf_dev_ops, sdev);
+       err = PTR_ERR_OR_ZERO(sdev->bpf_dev);
+       if (err)
+               return err;
 
-               ns->sdev->bpf_dev = bpf_offload_dev_create(&nsim_bpf_dev_ops,
-                                                          ns);
-               err = PTR_ERR_OR_ZERO(ns->sdev->bpf_dev);
+       sdev->bpf_bind_accept = true;
+       debugfs_create_bool("bpf_bind_accept", 0600, sdev->ddir,
+                           &sdev->bpf_bind_accept);
+       debugfs_create_u32("bpf_bind_verifier_delay", 0600, sdev->ddir,
+                          &sdev->bpf_bind_verifier_delay);
+       return 0;
+}
+
+static void nsim_bpf_sdev_uninit(struct netdevsim_shared_dev *sdev)
+{
+       WARN_ON(!list_empty(&sdev->bpf_bound_progs));
+       WARN_ON(!list_empty(&sdev->bpf_bound_maps));
+       bpf_offload_dev_destroy(sdev->bpf_dev);
+}
+
+int nsim_bpf_init(struct netdevsim *ns)
+{
+       int err;
+
+       if (ns->sdev->refcnt == 1) {
+               err = nsim_bpf_sdev_init(ns->sdev);
                if (err)
                        return err;
        }
 
        err = bpf_offload_dev_netdev_register(ns->sdev->bpf_dev, ns->netdev);
        if (err)
-               goto err_destroy_bdev;
+               goto err_bpf_sdev_uninit;
 
        debugfs_create_u32("bpf_offloaded_id", 0400, ns->ddir,
                           &ns->bpf_offloaded_id);
 
-       ns->bpf_bind_accept = true;
-       debugfs_create_bool("bpf_bind_accept", 0600, ns->ddir,
-                           &ns->bpf_bind_accept);
-       debugfs_create_u32("bpf_bind_verifier_delay", 0600, ns->ddir,
-                          &ns->bpf_bind_verifier_delay);
-
        ns->bpf_tc_accept = true;
        debugfs_create_bool("bpf_tc_accept", 0600, ns->ddir,
                            &ns->bpf_tc_accept);
@@ -627,9 +645,9 @@ int nsim_bpf_init(struct netdevsim *ns)
 
        return 0;
 
-err_destroy_bdev:
+err_bpf_sdev_uninit:
        if (ns->sdev->refcnt == 1)
-               bpf_offload_dev_destroy(ns->sdev->bpf_dev);
+               nsim_bpf_sdev_uninit(ns->sdev);
        return err;
 }
 
@@ -640,9 +658,6 @@ void nsim_bpf_uninit(struct netdevsim *ns)
        WARN_ON(ns->bpf_offloaded);
        bpf_offload_dev_netdev_unregister(ns->sdev->bpf_dev, ns->netdev);
 
-       if (ns->sdev->refcnt == 1) {
-               WARN_ON(!list_empty(&ns->sdev->bpf_bound_progs));
-               WARN_ON(!list_empty(&ns->sdev->bpf_bound_maps));
-               bpf_offload_dev_destroy(ns->sdev->bpf_dev);
-       }
+       if (ns->sdev->refcnt == 1)
+               nsim_bpf_sdev_uninit(ns->sdev);
 }
index 75a50b59cb8f2030de5d2833cc747897ee92f1dd..7805fa8403836e7c4b11182452c0bc0840b9073a 100644 (file)
@@ -25,6 +25,8 @@
 
 #include "netdevsim.h"
 
+static u32 nsim_dev_id;
+
 struct nsim_vf_config {
        int link_state;
        u16 min_tx_rate;
@@ -38,10 +40,7 @@ struct nsim_vf_config {
        bool rss_query_enabled;
 };
 
-static u32 nsim_dev_id;
-
 static struct dentry *nsim_ddir;
-static struct dentry *nsim_sdev_ddir;
 
 static int nsim_num_vf(struct device *dev)
 {
@@ -139,7 +138,6 @@ static void nsim_dev_release(struct device *dev)
        struct netdevsim *ns = to_nsim(dev);
 
        nsim_vfs_disable(ns);
-       free_netdev(ns->netdev);
 }
 
 static struct device_type nsim_dev_type = {
@@ -159,8 +157,8 @@ static int nsim_get_port_parent_id(struct net_device *dev,
 
 static int nsim_init(struct net_device *dev)
 {
-       char sdev_ddir_name[10], sdev_link_name[32];
        struct netdevsim *ns = netdev_priv(dev);
+       char sdev_link_name[32];
        int err;
 
        ns->netdev = dev;
@@ -168,32 +166,13 @@ static int nsim_init(struct net_device *dev)
        if (IS_ERR_OR_NULL(ns->ddir))
                return -ENOMEM;
 
-       if (!ns->sdev) {
-               ns->sdev = kzalloc(sizeof(*ns->sdev), GFP_KERNEL);
-               if (!ns->sdev) {
-                       err = -ENOMEM;
-                       goto err_debugfs_destroy;
-               }
-               ns->sdev->refcnt = 1;
-               ns->sdev->switch_id = nsim_dev_id;
-               sprintf(sdev_ddir_name, "%u", ns->sdev->switch_id);
-               ns->sdev->ddir = debugfs_create_dir(sdev_ddir_name,
-                                                   nsim_sdev_ddir);
-               if (IS_ERR_OR_NULL(ns->sdev->ddir)) {
-                       err = PTR_ERR_OR_ZERO(ns->sdev->ddir) ?: -EINVAL;
-                       goto err_sdev_free;
-               }
-       } else {
-               sprintf(sdev_ddir_name, "%u", ns->sdev->switch_id);
-               ns->sdev->refcnt++;
-       }
-
-       sprintf(sdev_link_name, "../../" DRV_NAME "_sdev/%s", sdev_ddir_name);
+       sprintf(sdev_link_name, "../../" DRV_NAME "_sdev/%u",
+               ns->sdev->switch_id);
        debugfs_create_symlink("sdev", ns->ddir, sdev_link_name);
 
        err = nsim_bpf_init(ns);
        if (err)
-               goto err_sdev_destroy;
+               goto err_debugfs_destroy;
 
        ns->dev.id = nsim_dev_id++;
        ns->dev.bus = &nsim_bus;
@@ -216,12 +195,6 @@ static int nsim_init(struct net_device *dev)
        device_unregister(&ns->dev);
 err_bpf_uninit:
        nsim_bpf_uninit(ns);
-err_sdev_destroy:
-       if (!--ns->sdev->refcnt) {
-               debugfs_remove_recursive(ns->sdev->ddir);
-err_sdev_free:
-               kfree(ns->sdev);
-       }
 err_debugfs_destroy:
        debugfs_remove_recursive(ns->ddir);
        return err;
@@ -235,10 +208,6 @@ static void nsim_uninit(struct net_device *dev)
        nsim_devlink_teardown(ns);
        debugfs_remove_recursive(ns->ddir);
        nsim_bpf_uninit(ns);
-       if (!--ns->sdev->refcnt) {
-               debugfs_remove_recursive(ns->sdev->ddir);
-               kfree(ns->sdev);
-       }
 }
 
 static void nsim_free(struct net_device *dev)
@@ -247,6 +216,7 @@ static void nsim_free(struct net_device *dev)
 
        device_unregister(&ns->dev);
        /* netdev and vf state will be freed out of device_release() */
+       nsim_sdev_put(ns->sdev);
 }
 
 static netdev_tx_t nsim_start_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -490,6 +460,7 @@ static void nsim_setup(struct net_device *dev)
        eth_hw_addr_random(dev);
 
        dev->netdev_ops = &nsim_netdev_ops;
+       dev->needs_free_netdev = true;
        dev->priv_destructor = nsim_free;
 
        dev->tx_queue_len = 0;
@@ -523,10 +494,11 @@ static int nsim_newlink(struct net *src_net, struct net_device *dev,
                        struct netlink_ext_ack *extack)
 {
        struct netdevsim *ns = netdev_priv(dev);
+       struct netdevsim *joinns = NULL;
+       int err;
 
        if (tb[IFLA_LINK]) {
                struct net_device *joindev;
-               struct netdevsim *joinns;
 
                joindev = __dev_get_by_index(src_net,
                                             nla_get_u32(tb[IFLA_LINK]));
@@ -536,17 +508,20 @@ static int nsim_newlink(struct net *src_net, struct net_device *dev,
                        return -EINVAL;
 
                joinns = netdev_priv(joindev);
-               if (!joinns->sdev || !joinns->sdev->refcnt)
-                       return -EINVAL;
-               ns->sdev = joinns->sdev;
        }
 
-       return register_netdevice(dev);
-}
+       ns->sdev = nsim_sdev_get(joinns);
+       if (IS_ERR(ns->sdev))
+               return PTR_ERR(ns->sdev);
 
-static void nsim_dellink(struct net_device *dev, struct list_head *head)
-{
-       unregister_netdevice_queue(dev, head);
+       err = register_netdevice(dev);
+       if (err)
+               goto err_sdev_put;
+       return 0;
+
+err_sdev_put:
+       nsim_sdev_put(ns->sdev);
+       return err;
 }
 
 static struct rtnl_link_ops nsim_link_ops __read_mostly = {
@@ -555,7 +530,6 @@ static struct rtnl_link_ops nsim_link_ops __read_mostly = {
        .setup          = nsim_setup,
        .validate       = nsim_validate,
        .newlink        = nsim_newlink,
-       .dellink        = nsim_dellink,
 };
 
 static int __init nsim_module_init(void)
@@ -566,15 +540,13 @@ static int __init nsim_module_init(void)
        if (IS_ERR_OR_NULL(nsim_ddir))
                return -ENOMEM;
 
-       nsim_sdev_ddir = debugfs_create_dir(DRV_NAME "_sdev", NULL);
-       if (IS_ERR_OR_NULL(nsim_sdev_ddir)) {
-               err = -ENOMEM;
+       err = nsim_sdev_init();
+       if (err)
                goto err_debugfs_destroy;
-       }
 
        err = bus_register(&nsim_bus);
        if (err)
-               goto err_sdir_destroy;
+               goto err_sdev_exit;
 
        err = nsim_devlink_init();
        if (err)
@@ -590,8 +562,8 @@ static int __init nsim_module_init(void)
        nsim_devlink_exit();
 err_unreg_bus:
        bus_unregister(&nsim_bus);
-err_sdir_destroy:
-       debugfs_remove_recursive(nsim_sdev_ddir);
+err_sdev_exit:
+       nsim_sdev_exit();
 err_debugfs_destroy:
        debugfs_remove_recursive(nsim_ddir);
        return err;
@@ -602,7 +574,7 @@ static void __exit nsim_module_exit(void)
        rtnl_link_unregister(&nsim_link_ops);
        nsim_devlink_exit();
        bus_unregister(&nsim_bus);
-       debugfs_remove_recursive(nsim_sdev_ddir);
+       nsim_sdev_exit();
        debugfs_remove_recursive(nsim_ddir);
 }
 
index 384c254fafc5c0513c54851f3e88c030d3e0847d..2667f9b0e1f9e0dd0e57edb82d0c9997ca84e201 100644 (file)
@@ -39,6 +39,9 @@ struct netdevsim_shared_dev {
 
        struct bpf_offload_dev *bpf_dev;
 
+       bool bpf_bind_accept;
+       u32 bpf_bind_verifier_delay;
+
        struct dentry *ddir_bpf_bound_progs;
        u32 prog_id_gen;
 
@@ -46,6 +49,13 @@ struct netdevsim_shared_dev {
        struct list_head bpf_bound_maps;
 };
 
+struct netdevsim;
+
+struct netdevsim_shared_dev *nsim_sdev_get(struct netdevsim *joinns);
+void nsim_sdev_put(struct netdevsim_shared_dev *sdev);
+int nsim_sdev_init(void);
+void nsim_sdev_exit(void);
+
 #define NSIM_IPSEC_MAX_SA_COUNT                33
 #define NSIM_IPSEC_VALID               BIT(31)
 
@@ -88,18 +98,13 @@ struct netdevsim {
        struct xdp_attachment_info xdp;
        struct xdp_attachment_info xdp_hw;
 
-       bool bpf_bind_accept;
-       u32 bpf_bind_verifier_delay;
-
        bool bpf_tc_accept;
        bool bpf_tc_non_bound_accept;
        bool bpf_xdpdrv_accept;
        bool bpf_xdpoffload_accept;
 
        bool bpf_map_accept;
-#if IS_ENABLED(CONFIG_NET_DEVLINK)
        struct devlink *devlink;
-#endif
        struct nsim_ipsec ipsec;
 };
 
@@ -138,7 +143,6 @@ nsim_bpf_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
 }
 #endif
 
-#if IS_ENABLED(CONFIG_NET_DEVLINK)
 enum nsim_resource_id {
        NSIM_RESOURCE_NONE,   /* DEVLINK_RESOURCE_ID_PARENT_TOP */
        NSIM_RESOURCE_IPV4,
@@ -160,25 +164,6 @@ void nsim_fib_exit(void);
 u64 nsim_fib_get_val(struct net *net, enum nsim_resource_id res_id, bool max);
 int nsim_fib_set_max(struct net *net, enum nsim_resource_id res_id, u64 val,
                     struct netlink_ext_ack *extack);
-#else
-static inline int nsim_devlink_setup(struct netdevsim *ns)
-{
-       return 0;
-}
-
-static inline void nsim_devlink_teardown(struct netdevsim *ns)
-{
-}
-
-static inline int nsim_devlink_init(void)
-{
-       return 0;
-}
-
-static inline void nsim_devlink_exit(void)
-{
-}
-#endif
 
 #if IS_ENABLED(CONFIG_XFRM_OFFLOAD)
 void nsim_ipsec_init(struct netdevsim *ns);
diff --git a/drivers/net/netdevsim/sdev.c b/drivers/net/netdevsim/sdev.c
new file mode 100644 (file)
index 0000000..6712da3
--- /dev/null
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 Mellanox Technologies. All rights reserved */
+
+#include <linux/debugfs.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#include "netdevsim.h"
+
+static struct dentry *nsim_sdev_ddir;
+
+static u32 nsim_sdev_id;
+
+struct netdevsim_shared_dev *nsim_sdev_get(struct netdevsim *joinns)
+{
+       struct netdevsim_shared_dev *sdev;
+       char sdev_ddir_name[10];
+       int err;
+
+       if (joinns) {
+               if (WARN_ON(!joinns->sdev))
+                       return ERR_PTR(-EINVAL);
+               sdev = joinns->sdev;
+               sdev->refcnt++;
+               return sdev;
+       }
+
+       sdev = kzalloc(sizeof(*sdev), GFP_KERNEL);
+       if (!sdev)
+               return ERR_PTR(-ENOMEM);
+       sdev->refcnt = 1;
+       sdev->switch_id = nsim_sdev_id++;
+
+       sprintf(sdev_ddir_name, "%u", sdev->switch_id);
+       sdev->ddir = debugfs_create_dir(sdev_ddir_name, nsim_sdev_ddir);
+       if (IS_ERR_OR_NULL(sdev->ddir)) {
+               err = PTR_ERR_OR_ZERO(sdev->ddir) ?: -EINVAL;
+               goto err_sdev_free;
+       }
+
+       return sdev;
+
+err_sdev_free:
+       nsim_sdev_id--;
+       kfree(sdev);
+       return ERR_PTR(err);
+}
+
+void nsim_sdev_put(struct netdevsim_shared_dev *sdev)
+{
+       if (--sdev->refcnt)
+               return;
+       debugfs_remove_recursive(sdev->ddir);
+       kfree(sdev);
+}
+
+int nsim_sdev_init(void)
+{
+       nsim_sdev_ddir = debugfs_create_dir(DRV_NAME "_sdev", NULL);
+       if (IS_ERR_OR_NULL(nsim_sdev_ddir))
+               return -ENOMEM;
+       return 0;
+}
+
+void nsim_sdev_exit(void)
+{
+       debugfs_remove_recursive(nsim_sdev_ddir);
+}
index 1c66e92c717cbac7d0f4dc85260be39aa3ab1e26..d408b2eb29664785a7d72e53ded1e48a6b2e716f 100644 (file)
@@ -76,6 +76,17 @@ config MDIO_BUS_MUX_GPIO
          several child MDIO busses to a parent bus.  Child bus
          selection is under the control of GPIO lines.
 
+config MDIO_BUS_MUX_MESON_G12A
+       tristate "Amlogic G12a based MDIO bus multiplexer"
+       depends on ARCH_MESON || COMPILE_TEST
+       depends on OF_MDIO && HAS_IOMEM && COMMON_CLK
+       select MDIO_BUS_MUX
+       default m if ARCH_MESON
+       help
+         This module provides a driver for the MDIO multiplexer/glue of
+         the amlogic g12a SoC. The multiplexers connects either the external
+         or the internal MDIO bus to the parent bus.
+
 config MDIO_BUS_MUX_MMIOREG
        tristate "MMIO device-controlled MDIO bus multiplexers"
        depends on OF_MDIO && HAS_IOMEM
index ece5dae67174f87362e7950fcd4171b3c0c63d65..27d7f9f3b0de4c820ce1fdb6f7b1237df3601f5a 100644 (file)
@@ -28,6 +28,7 @@ obj-$(CONFIG_MDIO_BITBANG)    += mdio-bitbang.o
 obj-$(CONFIG_MDIO_BUS_MUX)     += mdio-mux.o
 obj-$(CONFIG_MDIO_BUS_MUX_BCM_IPROC)   += mdio-mux-bcm-iproc.o
 obj-$(CONFIG_MDIO_BUS_MUX_GPIO)        += mdio-mux-gpio.o
+obj-$(CONFIG_MDIO_BUS_MUX_MESON_G12A)  += mdio-mux-meson-g12a.o
 obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o
 obj-$(CONFIG_MDIO_BUS_MUX_MULTIPLEXER) += mdio-mux-multiplexer.o
 obj-$(CONFIG_MDIO_CAVIUM)      += mdio-cavium.o
index 65b4b0960b1e03c9a0c047db2e0f6c96811feec6..eef35f8c8d450cf0118202eda126866a80d8ee0e 100644 (file)
@@ -60,7 +60,7 @@ static struct phy_driver am79c_driver[] = { {
        .phy_id         = PHY_ID_AM79C874,
        .name           = "AM79C874",
        .phy_id_mask    = 0xfffffff0,
-       .features       = PHY_BASIC_FEATURES,
+       /* PHY_BASIC_FEATURES */
        .config_init    = am79c_config_init,
        .ack_interrupt  = am79c_ack_interrupt,
        .config_intr    = am79c_config_intr,
index ae6a76d3f2fe7aacf6d5f234584fa981bb552d0d..eed4fe3d871f2c477555f6f4eb8d8be6f5e73c48 100644 (file)
 #define MDIO_AN_RX_VEND_STAT3                  0xe832
 #define MDIO_AN_RX_VEND_STAT3_AFR              BIT(0)
 
+/* MDIO_MMD_C22EXT */
+#define MDIO_C22EXT_STAT_SGMII_RX_GOOD_FRAMES          0xd292
+#define MDIO_C22EXT_STAT_SGMII_RX_BAD_FRAMES           0xd294
+#define MDIO_C22EXT_STAT_SGMII_RX_FALSE_CARRIER                0xd297
+#define MDIO_C22EXT_STAT_SGMII_TX_GOOD_FRAMES          0xd313
+#define MDIO_C22EXT_STAT_SGMII_TX_BAD_FRAMES           0xd315
+#define MDIO_C22EXT_STAT_SGMII_TX_FALSE_CARRIER                0xd317
+#define MDIO_C22EXT_STAT_SGMII_TX_COLLISIONS           0xd318
+#define MDIO_C22EXT_STAT_SGMII_TX_LINE_COLLISIONS      0xd319
+#define MDIO_C22EXT_STAT_SGMII_TX_FRAME_ALIGN_ERR      0xd31a
+#define MDIO_C22EXT_STAT_SGMII_TX_RUNT_FRAMES          0xd31b
+
 /* Vendor specific 1, MDIO_MMD_VEND1 */
 #define VEND1_GLOBAL_FW_ID                     0x0020
 #define VEND1_GLOBAL_FW_ID_MAJOR               GENMASK(15, 8)
 #define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL2     BIT(1)
 #define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL3     BIT(0)
 
+struct aqr107_hw_stat {
+       const char *name;
+       int reg;
+       int size;
+};
+
+#define SGMII_STAT(n, r, s) { n, MDIO_C22EXT_STAT_SGMII_ ## r, s }
+static const struct aqr107_hw_stat aqr107_hw_stats[] = {
+       SGMII_STAT("sgmii_rx_good_frames",          RX_GOOD_FRAMES,     26),
+       SGMII_STAT("sgmii_rx_bad_frames",           RX_BAD_FRAMES,      26),
+       SGMII_STAT("sgmii_rx_false_carrier_events", RX_FALSE_CARRIER,    8),
+       SGMII_STAT("sgmii_tx_good_frames",          TX_GOOD_FRAMES,     26),
+       SGMII_STAT("sgmii_tx_bad_frames",           TX_BAD_FRAMES,      26),
+       SGMII_STAT("sgmii_tx_false_carrier_events", TX_FALSE_CARRIER,    8),
+       SGMII_STAT("sgmii_tx_collisions",           TX_COLLISIONS,       8),
+       SGMII_STAT("sgmii_tx_line_collisions",      TX_LINE_COLLISIONS,  8),
+       SGMII_STAT("sgmii_tx_frame_alignment_err",  TX_FRAME_ALIGN_ERR, 16),
+       SGMII_STAT("sgmii_tx_runt_frames",          TX_RUNT_FRAMES,     22),
+};
+#define AQR107_SGMII_STAT_SZ ARRAY_SIZE(aqr107_hw_stats)
+
+struct aqr107_priv {
+       u64 sgmii_stats[AQR107_SGMII_STAT_SZ];
+};
+
+static int aqr107_get_sset_count(struct phy_device *phydev)
+{
+       return AQR107_SGMII_STAT_SZ;
+}
+
+static void aqr107_get_strings(struct phy_device *phydev, u8 *data)
+{
+       int i;
+
+       for (i = 0; i < AQR107_SGMII_STAT_SZ; i++)
+               strscpy(data + i * ETH_GSTRING_LEN, aqr107_hw_stats[i].name,
+                       ETH_GSTRING_LEN);
+}
+
+static u64 aqr107_get_stat(struct phy_device *phydev, int index)
+{
+       const struct aqr107_hw_stat *stat = aqr107_hw_stats + index;
+       int len_l = min(stat->size, 16);
+       int len_h = stat->size - len_l;
+       u64 ret;
+       int val;
+
+       val = phy_read_mmd(phydev, MDIO_MMD_C22EXT, stat->reg);
+       if (val < 0)
+               return U64_MAX;
+
+       ret = val & GENMASK(len_l - 1, 0);
+       if (len_h) {
+               val = phy_read_mmd(phydev, MDIO_MMD_C22EXT, stat->reg + 1);
+               if (val < 0)
+                       return U64_MAX;
+
+               ret += (val & GENMASK(len_h - 1, 0)) << 16;
+       }
+
+       return ret;
+}
+
+static void aqr107_get_stats(struct phy_device *phydev,
+                            struct ethtool_stats *stats, u64 *data)
+{
+       struct aqr107_priv *priv = phydev->priv;
+       u64 val;
+       int i;
+
+       for (i = 0; i < AQR107_SGMII_STAT_SZ; i++) {
+               val = aqr107_get_stat(phydev, i);
+               if (val == U64_MAX)
+                       phydev_err(phydev, "Reading HW Statistics failed for %s\n",
+                                  aqr107_hw_stats[i].name);
+               else
+                       priv->sgmii_stats[i] += val;
+
+               data[i] = priv->sgmii_stats[i];
+       }
+}
+
 static int aqr_config_aneg(struct phy_device *phydev)
 {
        bool changed = false;
@@ -478,12 +572,32 @@ static void aqr107_link_change_notify(struct phy_device *phydev)
                phydev_info(phydev, "Aquantia 1000Base-T2 mode active\n");
 }
 
+static int aqr107_suspend(struct phy_device *phydev)
+{
+       return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1,
+                               MDIO_CTRL1_LPOWER);
+}
+
+static int aqr107_resume(struct phy_device *phydev)
+{
+       return phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1,
+                                 MDIO_CTRL1_LPOWER);
+}
+
+static int aqr107_probe(struct phy_device *phydev)
+{
+       phydev->priv = devm_kzalloc(&phydev->mdio.dev,
+                                   sizeof(struct aqr107_priv), GFP_KERNEL);
+       if (!phydev->priv)
+               return -ENOMEM;
+
+       return aqr_hwmon_probe(phydev);
+}
+
 static struct phy_driver aqr_driver[] = {
 {
        PHY_ID_MATCH_MODEL(PHY_ID_AQ1202),
        .name           = "Aquantia AQ1202",
-       .aneg_done      = genphy_c45_aneg_done,
-       .get_features   = genphy_c45_pma_read_abilities,
        .config_aneg    = aqr_config_aneg,
        .config_intr    = aqr_config_intr,
        .ack_interrupt  = aqr_ack_interrupt,
@@ -492,8 +606,6 @@ static struct phy_driver aqr_driver[] = {
 {
        PHY_ID_MATCH_MODEL(PHY_ID_AQ2104),
        .name           = "Aquantia AQ2104",
-       .aneg_done      = genphy_c45_aneg_done,
-       .get_features   = genphy_c45_pma_read_abilities,
        .config_aneg    = aqr_config_aneg,
        .config_intr    = aqr_config_intr,
        .ack_interrupt  = aqr_ack_interrupt,
@@ -502,8 +614,6 @@ static struct phy_driver aqr_driver[] = {
 {
        PHY_ID_MATCH_MODEL(PHY_ID_AQR105),
        .name           = "Aquantia AQR105",
-       .aneg_done      = genphy_c45_aneg_done,
-       .get_features   = genphy_c45_pma_read_abilities,
        .config_aneg    = aqr_config_aneg,
        .config_intr    = aqr_config_intr,
        .ack_interrupt  = aqr_ack_interrupt,
@@ -512,8 +622,6 @@ static struct phy_driver aqr_driver[] = {
 {
        PHY_ID_MATCH_MODEL(PHY_ID_AQR106),
        .name           = "Aquantia AQR106",
-       .aneg_done      = genphy_c45_aneg_done,
-       .get_features   = genphy_c45_pma_read_abilities,
        .config_aneg    = aqr_config_aneg,
        .config_intr    = aqr_config_intr,
        .ack_interrupt  = aqr_ack_interrupt,
@@ -522,9 +630,7 @@ static struct phy_driver aqr_driver[] = {
 {
        PHY_ID_MATCH_MODEL(PHY_ID_AQR107),
        .name           = "Aquantia AQR107",
-       .aneg_done      = genphy_c45_aneg_done,
-       .get_features   = genphy_c45_pma_read_abilities,
-       .probe          = aqr_hwmon_probe,
+       .probe          = aqr107_probe,
        .config_init    = aqr107_config_init,
        .config_aneg    = aqr_config_aneg,
        .config_intr    = aqr_config_intr,
@@ -532,14 +638,17 @@ static struct phy_driver aqr_driver[] = {
        .read_status    = aqr107_read_status,
        .get_tunable    = aqr107_get_tunable,
        .set_tunable    = aqr107_set_tunable,
+       .suspend        = aqr107_suspend,
+       .resume         = aqr107_resume,
+       .get_sset_count = aqr107_get_sset_count,
+       .get_strings    = aqr107_get_strings,
+       .get_stats      = aqr107_get_stats,
        .link_change_notify = aqr107_link_change_notify,
 },
 {
        PHY_ID_MATCH_MODEL(PHY_ID_AQCS109),
        .name           = "Aquantia AQCS109",
-       .aneg_done      = genphy_c45_aneg_done,
-       .get_features   = genphy_c45_pma_read_abilities,
-       .probe          = aqr_hwmon_probe,
+       .probe          = aqr107_probe,
        .config_init    = aqcs109_config_init,
        .config_aneg    = aqr_config_aneg,
        .config_intr    = aqr_config_intr,
@@ -547,13 +656,16 @@ static struct phy_driver aqr_driver[] = {
        .read_status    = aqr107_read_status,
        .get_tunable    = aqr107_get_tunable,
        .set_tunable    = aqr107_set_tunable,
+       .suspend        = aqr107_suspend,
+       .resume         = aqr107_resume,
+       .get_sset_count = aqr107_get_sset_count,
+       .get_strings    = aqr107_get_strings,
+       .get_stats      = aqr107_get_stats,
        .link_change_notify = aqr107_link_change_notify,
 },
 {
        PHY_ID_MATCH_MODEL(PHY_ID_AQR405),
        .name           = "Aquantia AQR405",
-       .aneg_done      = genphy_c45_aneg_done,
-       .get_features   = genphy_c45_pma_read_abilities,
        .config_aneg    = aqr_config_aneg,
        .config_intr    = aqr_config_intr,
        .ack_interrupt  = aqr_ack_interrupt,
index f14ba5366b911e7791472f9d1cbabaae8b6fb622..79bf7ef1fcfd6a210c409acb282251ef30cb1341 100644 (file)
@@ -43,7 +43,7 @@ static struct phy_driver asix_driver[] = { {
        .phy_id         = PHY_ID_ASIX_AX88796B,
        .name           = "Asix Electronics AX88796B",
        .phy_id_mask    = 0xfffffff0,
-       .features       = PHY_BASIC_FEATURES,
+       /* PHY_BASIC_FEATURES */
        .soft_reset     = asix_soft_reset,
 } };
 
index f315ab468a0d92f9bb3f386abdb7e0777d5b3629..406111753f7c18673968bf63007d44e237aa7d4a 100644 (file)
@@ -389,7 +389,7 @@ static struct phy_driver at803x_driver[] = {
        .get_wol                = at803x_get_wol,
        .suspend                = at803x_suspend,
        .resume                 = at803x_resume,
-       .features               = PHY_GBIT_FEATURES,
+       /* PHY_GBIT_FEATURES */
        .ack_interrupt          = at803x_ack_interrupt,
        .config_intr            = at803x_config_intr,
 }, {
@@ -404,7 +404,7 @@ static struct phy_driver at803x_driver[] = {
        .get_wol                = at803x_get_wol,
        .suspend                = at803x_suspend,
        .resume                 = at803x_resume,
-       .features               = PHY_BASIC_FEATURES,
+       /* PHY_BASIC_FEATURES */
        .ack_interrupt          = at803x_ack_interrupt,
        .config_intr            = at803x_config_intr,
 }, {
@@ -418,7 +418,7 @@ static struct phy_driver at803x_driver[] = {
        .get_wol                = at803x_get_wol,
        .suspend                = at803x_suspend,
        .resume                 = at803x_resume,
-       .features               = PHY_GBIT_FEATURES,
+       /* PHY_GBIT_FEATURES */
        .aneg_done              = at803x_aneg_done,
        .ack_interrupt          = &at803x_ack_interrupt,
        .config_intr            = &at803x_config_intr,
index 625b7cb7628503b108274ef68ccb6601a6eb39ef..9ccf28b0a04d1aa253d03826dc65f2e4cb77158e 100644 (file)
@@ -254,7 +254,7 @@ static struct phy_driver bcm_cygnus_phy_driver[] = {
        .phy_id        = PHY_ID_BCM_CYGNUS,
        .phy_id_mask   = 0xfffffff0,
        .name          = "Broadcom Cygnus PHY",
-       .features      = PHY_GBIT_FEATURES,
+       /* PHY_GBIT_FEATURES */
        .config_init   = bcm_cygnus_config_init,
        .ack_interrupt = bcm_phy_ack_intr,
        .config_intr   = bcm_phy_config_intr,
@@ -264,7 +264,7 @@ static struct phy_driver bcm_cygnus_phy_driver[] = {
        .phy_id         = PHY_ID_BCM_OMEGA,
        .phy_id_mask    = 0xfffffff0,
        .name           = "Broadcom Omega Combo GPHY",
-       .features       = PHY_GBIT_FEATURES,
+       /* PHY_GBIT_FEATURES */
        .flags          = PHY_IS_INTERNAL,
        .config_init    = bcm_omega_config_init,
        .suspend        = genphy_suspend,
index 44e6cff419a094a7c1334ca15ef36084a61ecfb6..23f1958ba6ad4f6000aa0c5de4d22dca908ef248 100644 (file)
@@ -64,7 +64,7 @@ static struct phy_driver bcm63xx_driver[] = {
        .phy_id         = 0x00406000,
        .phy_id_mask    = 0xfffffc00,
        .name           = "Broadcom BCM63XX (1)",
-       .features       = PHY_BASIC_FEATURES,
+       /* PHY_BASIC_FEATURES */
        .flags          = PHY_IS_INTERNAL,
        .config_init    = bcm63xx_config_init,
        .ack_interrupt  = bcm_phy_ack_intr,
@@ -73,7 +73,7 @@ static struct phy_driver bcm63xx_driver[] = {
        /* same phy as above, with just a different OUI */
        .phy_id         = 0x002bdc00,
        .phy_id_mask    = 0xfffffc00,
-       .features       = PHY_BASIC_FEATURES,
+       /* PHY_BASIC_FEATURES */
        .flags          = PHY_IS_INTERNAL,
        .config_init    = bcm63xx_config_init,
        .ack_interrupt  = bcm_phy_ack_intr,
index a75e1b283541c89a9eec8f037ab228dff29e7373..8fc33867e524f865261b68f8c2f74ab0d8c6d660 100644 (file)
@@ -538,7 +538,7 @@ static int bcm7xxx_28nm_probe(struct phy_device *phydev)
        .phy_id         = (_oui),                                       \
        .phy_id_mask    = 0xfffffff0,                                   \
        .name           = _name,                                        \
-       .features       = PHY_GBIT_FEATURES,                            \
+       /* PHY_GBIT_FEATURES */                                         \
        .flags          = PHY_IS_INTERNAL,                              \
        .config_init    = bcm7xxx_28nm_config_init,                     \
        .resume         = bcm7xxx_28nm_resume,                          \
@@ -555,7 +555,7 @@ static int bcm7xxx_28nm_probe(struct phy_device *phydev)
        .phy_id         = (_oui),                                       \
        .phy_id_mask    = 0xfffffff0,                                   \
        .name           = _name,                                        \
-       .features       = PHY_BASIC_FEATURES,                           \
+       /* PHY_BASIC_FEATURES */                                        \
        .flags          = PHY_IS_INTERNAL,                              \
        .config_init    = bcm7xxx_28nm_ephy_config_init,                \
        .resume         = bcm7xxx_28nm_ephy_resume,                     \
@@ -570,7 +570,7 @@ static int bcm7xxx_28nm_probe(struct phy_device *phydev)
        .phy_id         = (_oui),                                       \
        .phy_id_mask    = 0xfffffff0,                                   \
        .name           = _name,                                        \
-       .features       = PHY_BASIC_FEATURES,                           \
+       /* PHY_BASIC_FEATURES */                                        \
        .flags          = PHY_IS_INTERNAL,                              \
        .config_init    = bcm7xxx_config_init,                          \
        .suspend        = bcm7xxx_suspend,                              \
index cb86a3e90c7de3ff41a7d821c135aec8dad9eef0..67fa05d6752369aa98d15fe059046647bbdebaed 100644 (file)
@@ -610,7 +610,7 @@ static struct phy_driver broadcom_drivers[] = {
        .phy_id         = PHY_ID_BCM5411,
        .phy_id_mask    = 0xfffffff0,
        .name           = "Broadcom BCM5411",
-       .features       = PHY_GBIT_FEATURES,
+       /* PHY_GBIT_FEATURES */
        .config_init    = bcm54xx_config_init,
        .ack_interrupt  = bcm_phy_ack_intr,
        .config_intr    = bcm_phy_config_intr,
@@ -618,7 +618,7 @@ static struct phy_driver broadcom_drivers[] = {
        .phy_id         = PHY_ID_BCM5421,
        .phy_id_mask    = 0xfffffff0,
        .name           = "Broadcom BCM5421",
-       .features       = PHY_GBIT_FEATURES,
+       /* PHY_GBIT_FEATURES */
        .config_init    = bcm54xx_config_init,
        .ack_interrupt  = bcm_phy_ack_intr,
        .config_intr    = bcm_phy_config_intr,
@@ -626,7 +626,7 @@ static struct phy_driver broadcom_drivers[] = {
        .phy_id         = PHY_ID_BCM54210E,
        .phy_id_mask    = 0xfffffff0,
        .name           = "Broadcom BCM54210E",
-       .features       = PHY_GBIT_FEATURES,
+       /* PHY_GBIT_FEATURES */
        .config_init    = bcm54xx_config_init,
        .ack_interrupt  = bcm_phy_ack_intr,
        .config_intr    = bcm_phy_config_intr,
@@ -634,7 +634,7 @@ static struct phy_driver broadcom_drivers[] = {
        .phy_id         = PHY_ID_BCM5461,
        .phy_id_mask    = 0xfffffff0,
        .name           = "Broadcom BCM5461",
-       .features       = PHY_GBIT_FEATURES,
+       /* PHY_GBIT_FEATURES */
        .config_init    = bcm54xx_config_init,
        .ack_interrupt  = bcm_phy_ack_intr,
        .config_intr    = bcm_phy_config_intr,
@@ -642,7 +642,7 @@ static struct phy_driver broadcom_drivers[] = {
        .phy_id         = PHY_ID_BCM54612E,
        .phy_id_mask    = 0xfffffff0,
        .name           = "Broadcom BCM54612E",
-       .features       = PHY_GBIT_FEATURES,
+       /* PHY_GBIT_FEATURES */
        .config_init    = bcm54xx_config_init,
        .ack_interrupt  = bcm_phy_ack_intr,
        .config_intr    = bcm_phy_config_intr,
@@ -650,7 +650,7 @@ static struct phy_driver broadcom_drivers[] = {
        .phy_id         = PHY_ID_BCM54616S,
        .phy_id_mask    = 0xfffffff0,
        .name           = "Broadcom BCM54616S",
-       .features       = PHY_GBIT_FEATURES,
+       /* PHY_GBIT_FEATURES */
        .config_init    = bcm54xx_config_init,
        .config_aneg    = bcm54616s_config_aneg,
        .ack_interrupt  = bcm_phy_ack_intr,
@@ -659,7 +659,7 @@ static struct phy_driver broadcom_drivers[] = {
        .phy_id         = PHY_ID_BCM5464,
        .phy_id_mask    = 0xfffffff0,
        .name           = "Broadcom BCM5464",
-       .features       = PHY_GBIT_FEATURES,
+       /* PHY_GBIT_FEATURES */
        .config_init    = bcm54xx_config_init,
        .ack_interrupt  = bcm_phy_ack_intr,
        .config_intr    = bcm_phy_config_intr,
@@ -667,7 +667,7 @@ static struct phy_driver broadcom_drivers[] = {
        .phy_id         = PHY_ID_BCM5481,
        .phy_id_mask    = 0xfffffff0,
        .name           = "Broadcom BCM5481",
-       .features       = PHY_GBIT_FEATURES,
+       /* PHY_GBIT_FEATURES */
        .config_init    = bcm54xx_config_init,
        .config_aneg    = bcm5481_config_aneg,
        .ack_interrupt  = bcm_phy_ack_intr,
@@ -676,7 +676,7 @@ static struct phy_driver broadcom_drivers[] = {
        .phy_id         = PHY_ID_BCM54810,
        .phy_id_mask    = 0xfffffff0,
        .name           = "Broadcom BCM54810",
-       .features       = PHY_GBIT_FEATURES,
+       /* PHY_GBIT_FEATURES */
        .config_init    = bcm54xx_config_init,
        .config_aneg    = bcm5481_config_aneg,
        .ack_interrupt  = bcm_phy_ack_intr,
@@ -685,7 +685,7 @@ static struct phy_driver broadcom_drivers[] = {
        .phy_id         = PHY_ID_BCM5482,
        .phy_id_mask    = 0xfffffff0,
        .name           = "Broadcom BCM5482",
-       .features       = PHY_GBIT_FEATURES,
+       /* PHY_GBIT_FEATURES */
        .config_init    = bcm5482_config_init,
        .read_status    = bcm5482_read_status,
        .ack_interrupt  = bcm_phy_ack_intr,
@@ -694,7 +694,7 @@ static struct phy_driver broadcom_drivers[] = {
        .phy_id         = PHY_ID_BCM50610,
        .phy_id_mask    = 0xfffffff0,
        .name           = "Broadcom BCM50610",
-       .features       = PHY_GBIT_FEATURES,
+       /* PHY_GBIT_FEATURES */
        .config_init    = bcm54xx_config_init,
        .ack_interrupt  = bcm_phy_ack_intr,
        .config_intr    = bcm_phy_config_intr,
@@ -702,7 +702,7 @@ static struct phy_driver broadcom_drivers[] = {
        .phy_id         = PHY_ID_BCM50610M,
        .phy_id_mask    = 0xfffffff0,
        .name           = "Broadcom BCM50610M",
-       .features       = PHY_GBIT_FEATURES,
+       /* PHY_GBIT_FEATURES */
        .config_init    = bcm54xx_config_init,
        .ack_interrupt  = bcm_phy_ack_intr,
        .config_intr    = bcm_phy_config_intr,
@@ -710,7 +710,7 @@ static struct phy_driver broadcom_drivers[] = {
        .phy_id         = PHY_ID_BCM57780,
        .phy_id_mask    = 0xfffffff0,
        .name           = "Broadcom BCM57780",
-       .features       = PHY_GBIT_FEATURES,
+       /* PHY_GBIT_FEATURES */
        .config_init    = bcm54xx_config_init,
        .ack_interrupt  = bcm_phy_ack_intr,
        .config_intr    = bcm_phy_config_intr,
@@ -718,7 +718,7 @@ static struct phy_driver broadcom_drivers[] = {
        .phy_id         = PHY_ID_BCMAC131,
        .phy_id_mask    = 0xfffffff0,
        .name           = "Broadcom BCMAC131",
-       .features       = PHY_BASIC_FEATURES,
+       /* PHY_BASIC_FEATURES */
        .config_init    = brcm_fet_config_init,
        .ack_interrupt  = brcm_fet_ack_interrupt,
        .config_intr    = brcm_fet_config_intr,
@@ -726,7 +726,7 @@ static struct phy_driver broadcom_drivers[] = {
        .phy_id         = PHY_ID_BCM5241,
        .phy_id_mask    = 0xfffffff0,
        .name           = "Broadcom BCM5241",
-       .features       = PHY_BASIC_FEATURES,
+       /* PHY_BASIC_FEATURES */
        .config_init    = brcm_fet_config_init,
        .ack_interrupt  = brcm_fet_ack_interrupt,
        .config_intr    = brcm_fet_config_intr,
@@ -735,7 +735,7 @@ static struct phy_driver broadcom_drivers[] = {
        .phy_id_mask    = 0xfffffff0,
        .name           = "Broadcom BCM5395",
        .flags          = PHY_IS_INTERNAL,
-       .features       = PHY_GBIT_FEATURES,
+       /* PHY_GBIT_FEATURES */
        .get_sset_count = bcm_phy_get_sset_count,
        .get_strings    = bcm_phy_get_strings,
        .get_stats      = bcm53xx_phy_get_stats,
@@ -744,7 +744,7 @@ static struct phy_driver broadcom_drivers[] = {
        .phy_id         = PHY_ID_BCM89610,
        .phy_id_mask    = 0xfffffff0,
        .name           = "Broadcom BCM89610",
-       .features       = PHY_GBIT_FEATURES,
+       /* PHY_GBIT_FEATURES */
        .config_init    = bcm54xx_config_init,
        .ack_interrupt  = bcm_phy_ack_intr,
        .config_intr    = bcm_phy_config_intr,
index 108ed24f84893654461aa94a48e5da20979ec8aa..9d1612a4d7e6c179159c826f1624ec616f6d7200 100644 (file)
@@ -102,7 +102,7 @@ static struct phy_driver cis820x_driver[] = {
        .phy_id         = 0x000fc410,
        .name           = "Cicada Cis8201",
        .phy_id_mask    = 0x000ffff0,
-       .features       = PHY_GBIT_FEATURES,
+       /* PHY_GBIT_FEATURES */
        .config_init    = &cis820x_config_init,
        .ack_interrupt  = &cis820x_ack_interrupt,
        .config_intr    = &cis820x_config_intr,
@@ -110,7 +110,7 @@ static struct phy_driver cis820x_driver[] = {
        .phy_id         = 0x000fc440,
        .name           = "Cicada Cis8204",
        .phy_id_mask    = 0x000fffc0,
-       .features       = PHY_GBIT_FEATURES,
+       /* PHY_GBIT_FEATURES */
        .config_init    = &cis820x_config_init,
        .ack_interrupt  = &cis820x_ack_interrupt,
        .config_intr    = &cis820x_config_intr,
index bf39baa7f2c8a5937842a176ad82d3e03715b8cb..942f277463a4d4776f3a954eca782a00e71c52be 100644 (file)
@@ -144,7 +144,7 @@ static struct phy_driver dm91xx_driver[] = {
        .phy_id         = 0x0181b880,
        .name           = "Davicom DM9161E",
        .phy_id_mask    = 0x0ffffff0,
-       .features       = PHY_BASIC_FEATURES,
+       /* PHY_BASIC_FEATURES */
        .config_init    = dm9161_config_init,
        .config_aneg    = dm9161_config_aneg,
        .ack_interrupt  = dm9161_ack_interrupt,
@@ -153,7 +153,7 @@ static struct phy_driver dm91xx_driver[] = {
        .phy_id         = 0x0181b8b0,
        .name           = "Davicom DM9161B/C",
        .phy_id_mask    = 0x0ffffff0,
-       .features       = PHY_BASIC_FEATURES,
+       /* PHY_BASIC_FEATURES */
        .config_init    = dm9161_config_init,
        .config_aneg    = dm9161_config_aneg,
        .ack_interrupt  = dm9161_ack_interrupt,
@@ -162,7 +162,7 @@ static struct phy_driver dm91xx_driver[] = {
        .phy_id         = 0x0181b8a0,
        .name           = "Davicom DM9161A",
        .phy_id_mask    = 0x0ffffff0,
-       .features       = PHY_BASIC_FEATURES,
+       /* PHY_BASIC_FEATURES */
        .config_init    = dm9161_config_init,
        .config_aneg    = dm9161_config_aneg,
        .ack_interrupt  = dm9161_ack_interrupt,
@@ -171,7 +171,7 @@ static struct phy_driver dm91xx_driver[] = {
        .phy_id         = 0x00181b80,
        .name           = "Davicom DM9131",
        .phy_id_mask    = 0x0ffffff0,
-       .features       = PHY_BASIC_FEATURES,
+       /* PHY_BASIC_FEATURES */
        .ack_interrupt  = dm9161_ack_interrupt,
        .config_intr    = dm9161_config_intr,
 } };
index 2fe2ebaf62d1b2c6a320efd375e5f4f5f3af142d..6580094161a9f6730e529124a08811f5ace9ff53 100644 (file)
@@ -1514,7 +1514,7 @@ static struct phy_driver dp83640_driver = {
        .phy_id         = DP83640_PHY_ID,
        .phy_id_mask    = 0xfffffff0,
        .name           = "NatSemi DP83640",
-       .features       = PHY_BASIC_FEATURES,
+       /* PHY_BASIC_FEATURES */
        .probe          = dp83640_probe,
        .remove         = dp83640_remove,
        .soft_reset     = dp83640_soft_reset,
index 97d45bd5b38e382b678dc3ce814f813cf045d7d6..7ed4760fb1557ac5ac4c163e416c06532d8e9150 100644 (file)
@@ -310,7 +310,7 @@ static int dp83822_resume(struct phy_device *phydev)
        {                                                       \
                PHY_ID_MATCH_MODEL(_id),                        \
                .name           = (_name),                      \
-               .features       = PHY_BASIC_FEATURES,           \
+               /* PHY_BASIC_FEATURES */                        \
                .soft_reset     = dp83822_phy_reset,            \
                .config_init    = dp83822_config_init,          \
                .get_wol = dp83822_get_wol,                     \
index f55dc907c2f39b579acb9f8042f8692900e5fe47..6f9bc7d91f17fd9597918a6fd1c2434cceb91592 100644 (file)
@@ -99,7 +99,7 @@ MODULE_DEVICE_TABLE(mdio, dp83848_tbl);
                .phy_id         = _id,                          \
                .phy_id_mask    = 0xfffffff0,                   \
                .name           = _name,                        \
-               .features       = PHY_BASIC_FEATURES,           \
+               /* PHY_BASIC_FEATURES */                        \
                                                                \
                .soft_reset     = genphy_soft_reset,            \
                .config_init    = _config_init,                 \
index 8448d01819efbde7a973061c945177ad221ffd2c..fd35131a0c39fd714e401725c8942ba239f21e8a 100644 (file)
@@ -315,7 +315,7 @@ static struct phy_driver dp83867_driver[] = {
                .phy_id         = DP83867_PHY_ID,
                .phy_id_mask    = 0xfffffff0,
                .name           = "TI DP83867",
-               .features       = PHY_GBIT_FEATURES,
+               /* PHY_GBIT_FEATURES */
 
                .config_init    = dp83867_config_init,
                .soft_reset     = dp83867_phy_reset,
index e9704af1d239b8650edc56a40f6d12804371116e..ac27da16824dab5b62e6a9b57d883a5d53359252 100644 (file)
@@ -338,7 +338,7 @@ static struct phy_driver dp83811_driver[] = {
                .phy_id = DP83TC811_PHY_ID,
                .phy_id_mask = 0xfffffff0,
                .name = "TI DP83TC811",
-               .features = PHY_BASIC_FEATURES,
+               /* PHY_BASIC_FEATURES */
                .config_init = dp83811_config_init,
                .config_aneg = dp83811_config_aneg,
                .soft_reset = dp83811_phy_reset,
index 2aa367c04a8ea63765fde3bfb9af160402fbb9f7..09e07b902d3a337c3c2aa46f0c056f0eddf6b545 100644 (file)
@@ -86,7 +86,7 @@ static struct phy_driver et1011c_driver[] = { {
        .phy_id         = 0x0282f014,
        .name           = "ET1011C",
        .phy_id_mask    = 0xfffffff0,
-       .features       = PHY_GBIT_FEATURES,
+       /* PHY_GBIT_FEATURES */
        .config_aneg    = et1011c_config_aneg,
        .read_status    = et1011c_read_status,
 } };
index ebef8354bc81347d2524f89f1a93f03dc7747fd2..d6e8516cd146ed9db359570cb6c308810775d8cc 100644 (file)
@@ -311,7 +311,7 @@ static struct phy_driver icplus_driver[] = {
        .phy_id         = 0x02430d80,
        .name           = "ICPlus IP175C",
        .phy_id_mask    = 0x0ffffff0,
-       .features       = PHY_BASIC_FEATURES,
+       /* PHY_BASIC_FEATURES */
        .config_init    = &ip175c_config_init,
        .config_aneg    = &ip175c_config_aneg,
        .read_status    = &ip175c_read_status,
@@ -321,7 +321,7 @@ static struct phy_driver icplus_driver[] = {
        .phy_id         = 0x02430d90,
        .name           = "ICPlus IP1001",
        .phy_id_mask    = 0x0ffffff0,
-       .features       = PHY_GBIT_FEATURES,
+       /* PHY_GBIT_FEATURES */
        .config_init    = &ip1001_config_init,
        .suspend        = genphy_suspend,
        .resume         = genphy_resume,
@@ -329,7 +329,7 @@ static struct phy_driver icplus_driver[] = {
        .phy_id         = 0x02430c54,
        .name           = "ICPlus IP101A/G",
        .phy_id_mask    = 0x0ffffff0,
-       .features       = PHY_BASIC_FEATURES,
+       /* PHY_BASIC_FEATURES */
        .probe          = ip101a_g_probe,
        .config_intr    = ip101a_g_config_intr,
        .did_interrupt  = ip101a_g_did_interrupt,
index 02d9713318b6dfdc8f600281d756e33288636de3..b7875b36097fe246c8627717233ab04d03298b54 100644 (file)
@@ -232,7 +232,7 @@ static struct phy_driver xway_gphy[] = {
                .phy_id         = PHY_ID_PHY11G_1_3,
                .phy_id_mask    = 0xffffffff,
                .name           = "Intel XWAY PHY11G (PEF 7071/PEF 7072) v1.3",
-               .features       = PHY_GBIT_FEATURES,
+               /* PHY_GBIT_FEATURES */
                .config_init    = xway_gphy_config_init,
                .config_aneg    = xway_gphy14_config_aneg,
                .ack_interrupt  = xway_gphy_ack_interrupt,
@@ -244,7 +244,7 @@ static struct phy_driver xway_gphy[] = {
                .phy_id         = PHY_ID_PHY22F_1_3,
                .phy_id_mask    = 0xffffffff,
                .name           = "Intel XWAY PHY22F (PEF 7061) v1.3",
-               .features       = PHY_BASIC_FEATURES,
+               /* PHY_BASIC_FEATURES */
                .config_init    = xway_gphy_config_init,
                .config_aneg    = xway_gphy14_config_aneg,
                .ack_interrupt  = xway_gphy_ack_interrupt,
@@ -256,7 +256,7 @@ static struct phy_driver xway_gphy[] = {
                .phy_id         = PHY_ID_PHY11G_1_4,
                .phy_id_mask    = 0xffffffff,
                .name           = "Intel XWAY PHY11G (PEF 7071/PEF 7072) v1.4",
-               .features       = PHY_GBIT_FEATURES,
+               /* PHY_GBIT_FEATURES */
                .config_init    = xway_gphy_config_init,
                .config_aneg    = xway_gphy14_config_aneg,
                .ack_interrupt  = xway_gphy_ack_interrupt,
@@ -268,7 +268,7 @@ static struct phy_driver xway_gphy[] = {
                .phy_id         = PHY_ID_PHY22F_1_4,
                .phy_id_mask    = 0xffffffff,
                .name           = "Intel XWAY PHY22F (PEF 7061) v1.4",
-               .features       = PHY_BASIC_FEATURES,
+               /* PHY_BASIC_FEATURES */
                .config_init    = xway_gphy_config_init,
                .config_aneg    = xway_gphy14_config_aneg,
                .ack_interrupt  = xway_gphy_ack_interrupt,
@@ -280,7 +280,7 @@ static struct phy_driver xway_gphy[] = {
                .phy_id         = PHY_ID_PHY11G_1_5,
                .phy_id_mask    = 0xffffffff,
                .name           = "Intel XWAY PHY11G (PEF 7071/PEF 7072) v1.5 / v1.6",
-               .features       = PHY_GBIT_FEATURES,
+               /* PHY_GBIT_FEATURES */
                .config_init    = xway_gphy_config_init,
                .ack_interrupt  = xway_gphy_ack_interrupt,
                .did_interrupt  = xway_gphy_did_interrupt,
@@ -291,7 +291,7 @@ static struct phy_driver xway_gphy[] = {
                .phy_id         = PHY_ID_PHY22F_1_5,
                .phy_id_mask    = 0xffffffff,
                .name           = "Intel XWAY PHY22F (PEF 7061) v1.5 / v1.6",
-               .features       = PHY_BASIC_FEATURES,
+               /* PHY_BASIC_FEATURES */
                .config_init    = xway_gphy_config_init,
                .ack_interrupt  = xway_gphy_ack_interrupt,
                .did_interrupt  = xway_gphy_did_interrupt,
@@ -302,7 +302,7 @@ static struct phy_driver xway_gphy[] = {
                .phy_id         = PHY_ID_PHY11G_VR9_1_1,
                .phy_id_mask    = 0xffffffff,
                .name           = "Intel XWAY PHY11G (xRX v1.1 integrated)",
-               .features       = PHY_GBIT_FEATURES,
+               /* PHY_GBIT_FEATURES */
                .config_init    = xway_gphy_config_init,
                .ack_interrupt  = xway_gphy_ack_interrupt,
                .did_interrupt  = xway_gphy_did_interrupt,
@@ -313,7 +313,7 @@ static struct phy_driver xway_gphy[] = {
                .phy_id         = PHY_ID_PHY22F_VR9_1_1,
                .phy_id_mask    = 0xffffffff,
                .name           = "Intel XWAY PHY22F (xRX v1.1 integrated)",
-               .features       = PHY_BASIC_FEATURES,
+               /* PHY_BASIC_FEATURES */
                .config_init    = xway_gphy_config_init,
                .ack_interrupt  = xway_gphy_ack_interrupt,
                .did_interrupt  = xway_gphy_did_interrupt,
@@ -324,7 +324,7 @@ static struct phy_driver xway_gphy[] = {
                .phy_id         = PHY_ID_PHY11G_VR9_1_2,
                .phy_id_mask    = 0xffffffff,
                .name           = "Intel XWAY PHY11G (xRX v1.2 integrated)",
-               .features       = PHY_GBIT_FEATURES,
+               /* PHY_GBIT_FEATURES */
                .config_init    = xway_gphy_config_init,
                .ack_interrupt  = xway_gphy_ack_interrupt,
                .did_interrupt  = xway_gphy_did_interrupt,
@@ -335,7 +335,7 @@ static struct phy_driver xway_gphy[] = {
                .phy_id         = PHY_ID_PHY22F_VR9_1_2,
                .phy_id_mask    = 0xffffffff,
                .name           = "Intel XWAY PHY22F (xRX v1.2 integrated)",
-               .features       = PHY_BASIC_FEATURES,
+               /* PHY_BASIC_FEATURES */
                .config_init    = xway_gphy_config_init,
                .ack_interrupt  = xway_gphy_ack_interrupt,
                .did_interrupt  = xway_gphy_did_interrupt,
index a93d673baf35dc2c4175c8badea698ab690fcdc3..31448628811929f063a379ac254771ea8dbd9f45 100644 (file)
@@ -251,7 +251,7 @@ static struct phy_driver lxt97x_driver[] = {
        .phy_id         = 0x78100000,
        .name           = "LXT970",
        .phy_id_mask    = 0xfffffff0,
-       .features       = PHY_BASIC_FEATURES,
+       /* PHY_BASIC_FEATURES */
        .config_init    = lxt970_config_init,
        .ack_interrupt  = lxt970_ack_interrupt,
        .config_intr    = lxt970_config_intr,
@@ -259,14 +259,14 @@ static struct phy_driver lxt97x_driver[] = {
        .phy_id         = 0x001378e0,
        .name           = "LXT971",
        .phy_id_mask    = 0xfffffff0,
-       .features       = PHY_BASIC_FEATURES,
+       /* PHY_BASIC_FEATURES */
        .ack_interrupt  = lxt971_ack_interrupt,
        .config_intr    = lxt971_config_intr,
 }, {
        .phy_id         = 0x00137a10,
        .name           = "LXT973-A2",
        .phy_id_mask    = 0xffffffff,
-       .features       = PHY_BASIC_FEATURES,
+       /* PHY_BASIC_FEATURES */
        .flags          = 0,
        .probe          = lxt973_probe,
        .config_aneg    = lxt973_config_aneg,
@@ -275,7 +275,7 @@ static struct phy_driver lxt97x_driver[] = {
        .phy_id         = 0x00137a10,
        .name           = "LXT973",
        .phy_id_mask    = 0xfffffff0,
-       .features       = PHY_BASIC_FEATURES,
+       /* PHY_BASIC_FEATURES */
        .flags          = 0,
        .probe          = lxt973_probe,
        .config_aneg    = lxt973_config_aneg,
index 65350186d514ac69da3f27e35152dfc66ff00b2d..8754cb883d021636fe7f5b9dccf72aa6518bd824 100644 (file)
@@ -2126,7 +2126,7 @@ static struct phy_driver marvell_drivers[] = {
                .phy_id = MARVELL_PHY_ID_88E1101,
                .phy_id_mask = MARVELL_PHY_ID_MASK,
                .name = "Marvell 88E1101",
-               .features = PHY_GBIT_FEATURES,
+               /* PHY_GBIT_FEATURES */
                .probe = marvell_probe,
                .config_init = &marvell_config_init,
                .config_aneg = &m88e1101_config_aneg,
@@ -2144,7 +2144,7 @@ static struct phy_driver marvell_drivers[] = {
                .phy_id = MARVELL_PHY_ID_88E1112,
                .phy_id_mask = MARVELL_PHY_ID_MASK,
                .name = "Marvell 88E1112",
-               .features = PHY_GBIT_FEATURES,
+               /* PHY_GBIT_FEATURES */
                .probe = marvell_probe,
                .config_init = &m88e1111_config_init,
                .config_aneg = &marvell_config_aneg,
@@ -2162,7 +2162,7 @@ static struct phy_driver marvell_drivers[] = {
                .phy_id = MARVELL_PHY_ID_88E1111,
                .phy_id_mask = MARVELL_PHY_ID_MASK,
                .name = "Marvell 88E1111",
-               .features = PHY_GBIT_FEATURES,
+               /* PHY_GBIT_FEATURES */
                .probe = marvell_probe,
                .config_init = &m88e1111_config_init,
                .config_aneg = &marvell_config_aneg,
@@ -2181,7 +2181,7 @@ static struct phy_driver marvell_drivers[] = {
                .phy_id = MARVELL_PHY_ID_88E1118,
                .phy_id_mask = MARVELL_PHY_ID_MASK,
                .name = "Marvell 88E1118",
-               .features = PHY_GBIT_FEATURES,
+               /* PHY_GBIT_FEATURES */
                .probe = marvell_probe,
                .config_init = &m88e1118_config_init,
                .config_aneg = &m88e1118_config_aneg,
@@ -2199,7 +2199,7 @@ static struct phy_driver marvell_drivers[] = {
                .phy_id = MARVELL_PHY_ID_88E1121R,
                .phy_id_mask = MARVELL_PHY_ID_MASK,
                .name = "Marvell 88E1121R",
-               .features = PHY_GBIT_FEATURES,
+               /* PHY_GBIT_FEATURES */
                .probe = &m88e1121_probe,
                .config_init = &marvell_config_init,
                .config_aneg = &m88e1121_config_aneg,
@@ -2219,7 +2219,7 @@ static struct phy_driver marvell_drivers[] = {
                .phy_id = MARVELL_PHY_ID_88E1318S,
                .phy_id_mask = MARVELL_PHY_ID_MASK,
                .name = "Marvell 88E1318S",
-               .features = PHY_GBIT_FEATURES,
+               /* PHY_GBIT_FEATURES */
                .probe = marvell_probe,
                .config_init = &m88e1318_config_init,
                .config_aneg = &m88e1318_config_aneg,
@@ -2241,7 +2241,7 @@ static struct phy_driver marvell_drivers[] = {
                .phy_id = MARVELL_PHY_ID_88E1145,
                .phy_id_mask = MARVELL_PHY_ID_MASK,
                .name = "Marvell 88E1145",
-               .features = PHY_GBIT_FEATURES,
+               /* PHY_GBIT_FEATURES */
                .probe = marvell_probe,
                .config_init = &m88e1145_config_init,
                .config_aneg = &m88e1101_config_aneg,
@@ -2260,7 +2260,7 @@ static struct phy_driver marvell_drivers[] = {
                .phy_id = MARVELL_PHY_ID_88E1149R,
                .phy_id_mask = MARVELL_PHY_ID_MASK,
                .name = "Marvell 88E1149R",
-               .features = PHY_GBIT_FEATURES,
+               /* PHY_GBIT_FEATURES */
                .probe = marvell_probe,
                .config_init = &m88e1149_config_init,
                .config_aneg = &m88e1118_config_aneg,
@@ -2278,7 +2278,7 @@ static struct phy_driver marvell_drivers[] = {
                .phy_id = MARVELL_PHY_ID_88E1240,
                .phy_id_mask = MARVELL_PHY_ID_MASK,
                .name = "Marvell 88E1240",
-               .features = PHY_GBIT_FEATURES,
+               /* PHY_GBIT_FEATURES */
                .probe = marvell_probe,
                .config_init = &m88e1111_config_init,
                .config_aneg = &marvell_config_aneg,
@@ -2296,7 +2296,7 @@ static struct phy_driver marvell_drivers[] = {
                .phy_id = MARVELL_PHY_ID_88E1116R,
                .phy_id_mask = MARVELL_PHY_ID_MASK,
                .name = "Marvell 88E1116R",
-               .features = PHY_GBIT_FEATURES,
+               /* PHY_GBIT_FEATURES */
                .probe = marvell_probe,
                .config_init = &m88e1116r_config_init,
                .ack_interrupt = &marvell_ack_interrupt,
@@ -2336,7 +2336,7 @@ static struct phy_driver marvell_drivers[] = {
                .phy_id = MARVELL_PHY_ID_88E1540,
                .phy_id_mask = MARVELL_PHY_ID_MASK,
                .name = "Marvell 88E1540",
-               .features = PHY_GBIT_FEATURES,
+               /* PHY_GBIT_FEATURES */
                .probe = m88e1510_probe,
                .config_init = &marvell_config_init,
                .config_aneg = &m88e1510_config_aneg,
@@ -2359,7 +2359,7 @@ static struct phy_driver marvell_drivers[] = {
                .phy_id_mask = MARVELL_PHY_ID_MASK,
                .name = "Marvell 88E1545",
                .probe = m88e1510_probe,
-               .features = PHY_GBIT_FEATURES,
+               /* PHY_GBIT_FEATURES */
                .config_init = &marvell_config_init,
                .config_aneg = &m88e1510_config_aneg,
                .read_status = &marvell_read_status,
@@ -2378,7 +2378,7 @@ static struct phy_driver marvell_drivers[] = {
                .phy_id = MARVELL_PHY_ID_88E3016,
                .phy_id_mask = MARVELL_PHY_ID_MASK,
                .name = "Marvell 88E3016",
-               .features = PHY_BASIC_FEATURES,
+               /* PHY_BASIC_FEATURES */
                .probe = marvell_probe,
                .config_init = &m88e3016_config_init,
                .aneg_done = &marvell_aneg_done,
@@ -2398,7 +2398,7 @@ static struct phy_driver marvell_drivers[] = {
                .phy_id = MARVELL_PHY_ID_88E6390,
                .phy_id_mask = MARVELL_PHY_ID_MASK,
                .name = "Marvell 88E6390",
-               .features = PHY_GBIT_FEATURES,
+               /* PHY_GBIT_FEATURES */
                .probe = m88e6390_probe,
                .config_init = &marvell_config_init,
                .config_aneg = &m88e6390_config_aneg,
index 100b401b1f4a125343afbf880a1ced60cc033d42..238a20e13d6abfbf9e6cfa61033b6153eff28de5 100644 (file)
@@ -48,6 +48,8 @@ enum {
        MV_AN_STAT1000          = 0x8001, /* 1000base-T status register */
 
        /* Vendor2 MMD registers */
+       MV_V2_PORT_CTRL         = 0xf001,
+       MV_V2_PORT_CTRL_PWRDOWN = 0x0800,
        MV_V2_TEMP_CTRL         = 0xf08a,
        MV_V2_TEMP_CTRL_MASK    = 0xc000,
        MV_V2_TEMP_CTRL_SAMPLE  = 0x0000,
@@ -226,11 +228,19 @@ static int mv3310_probe(struct phy_device *phydev)
 
 static int mv3310_suspend(struct phy_device *phydev)
 {
-       return 0;
+       return phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, MV_V2_PORT_CTRL,
+                               MV_V2_PORT_CTRL_PWRDOWN);
 }
 
 static int mv3310_resume(struct phy_device *phydev)
 {
+       int ret;
+
+       ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2, MV_V2_PORT_CTRL,
+                                MV_V2_PORT_CTRL_PWRDOWN);
+       if (ret)
+               return ret;
+
        return mv3310_hwmon_config(phydev, true);
 }
 
@@ -472,8 +482,9 @@ static struct phy_driver mv3310_drivers[] = {
                .phy_id         = MARVELL_PHY_ID_88E2110,
                .phy_id_mask    = MARVELL_PHY_ID_MASK,
                .name           = "mv88x2110",
-               .get_features   = genphy_c45_pma_read_abilities,
                .probe          = mv3310_probe,
+               .suspend        = mv3310_suspend,
+               .resume         = mv3310_resume,
                .soft_reset     = genphy_no_soft_reset,
                .config_init    = mv3310_config_init,
                .config_aneg    = mv3310_config_aneg,
diff --git a/drivers/net/phy/mdio-mux-meson-g12a.c b/drivers/net/phy/mdio-mux-meson-g12a.c
new file mode 100644 (file)
index 0000000..6fa29ea
--- /dev/null
@@ -0,0 +1,380 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 Baylibre, SAS.
+ * Author: Jerome Brunet <jbrunet@baylibre.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/mdio-mux.h>
+#include <linux/module.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+
+#define ETH_PLL_STS            0x40
+#define ETH_PLL_CTL0           0x44
+#define  PLL_CTL0_LOCK_DIG     BIT(30)
+#define  PLL_CTL0_RST          BIT(29)
+#define  PLL_CTL0_EN           BIT(28)
+#define  PLL_CTL0_SEL          BIT(23)
+#define  PLL_CTL0_N            GENMASK(14, 10)
+#define  PLL_CTL0_M            GENMASK(8, 0)
+#define  PLL_LOCK_TIMEOUT      1000000
+#define  PLL_MUX_NUM_PARENT    2
+#define ETH_PLL_CTL1           0x48
+#define ETH_PLL_CTL2           0x4c
+#define ETH_PLL_CTL3           0x50
+#define ETH_PLL_CTL4           0x54
+#define ETH_PLL_CTL5           0x58
+#define ETH_PLL_CTL6           0x5c
+#define ETH_PLL_CTL7           0x60
+
+#define ETH_PHY_CNTL0          0x80
+#define   EPHY_G12A_ID         0x33000180
+#define ETH_PHY_CNTL1          0x84
+#define  PHY_CNTL1_ST_MODE     GENMASK(2, 0)
+#define  PHY_CNTL1_ST_PHYADD   GENMASK(7, 3)
+#define   EPHY_DFLT_ADD                8
+#define  PHY_CNTL1_MII_MODE    GENMASK(15, 14)
+#define   EPHY_MODE_RMII       0x1
+#define  PHY_CNTL1_CLK_EN      BIT(16)
+#define  PHY_CNTL1_CLKFREQ     BIT(17)
+#define  PHY_CNTL1_PHY_ENB     BIT(18)
+#define ETH_PHY_CNTL2          0x88
+#define  PHY_CNTL2_USE_INTERNAL        BIT(5)
+#define  PHY_CNTL2_SMI_SRC_MAC BIT(6)
+#define  PHY_CNTL2_RX_CLK_EPHY BIT(9)
+
+#define MESON_G12A_MDIO_EXTERNAL_ID 0
+#define MESON_G12A_MDIO_INTERNAL_ID 1
+
+struct g12a_mdio_mux {
+       bool pll_is_enabled;
+       void __iomem *regs;
+       void *mux_handle;
+       struct clk *pclk;
+       struct clk *pll;
+};
+
+struct g12a_ephy_pll {
+       void __iomem *base;
+       struct clk_hw hw;
+};
+
+#define g12a_ephy_pll_to_dev(_hw)                      \
+       container_of(_hw, struct g12a_ephy_pll, hw)
+
+static unsigned long g12a_ephy_pll_recalc_rate(struct clk_hw *hw,
+                                              unsigned long parent_rate)
+{
+       struct g12a_ephy_pll *pll = g12a_ephy_pll_to_dev(hw);
+       u32 val, m, n;
+
+       val = readl(pll->base + ETH_PLL_CTL0);
+       m = FIELD_GET(PLL_CTL0_M, val);
+       n = FIELD_GET(PLL_CTL0_N, val);
+
+       return parent_rate * m / n;
+}
+
+static int g12a_ephy_pll_enable(struct clk_hw *hw)
+{
+       struct g12a_ephy_pll *pll = g12a_ephy_pll_to_dev(hw);
+       u32 val = readl(pll->base + ETH_PLL_CTL0);
+
+       /* Apply both enable an reset */
+       val |= PLL_CTL0_RST | PLL_CTL0_EN;
+       writel(val, pll->base + ETH_PLL_CTL0);
+
+       /* Clear the reset to let PLL lock */
+       val &= ~PLL_CTL0_RST;
+       writel(val, pll->base + ETH_PLL_CTL0);
+
+       /* Poll on the digital lock instead of the usual analog lock
+        * This is done because bit 31 is unreliable on some SoC. Bit
+        * 31 may indicate that the PLL is not lock eventhough the clock
+        * is actually running
+        */
+       return readl_poll_timeout(pll->base + ETH_PLL_CTL0, val,
+                                 val & PLL_CTL0_LOCK_DIG, 0, PLL_LOCK_TIMEOUT);
+}
+
+static void g12a_ephy_pll_disable(struct clk_hw *hw)
+{
+       struct g12a_ephy_pll *pll = g12a_ephy_pll_to_dev(hw);
+       u32 val;
+
+       val = readl(pll->base + ETH_PLL_CTL0);
+       val &= ~PLL_CTL0_EN;
+       val |= PLL_CTL0_RST;
+       writel(val, pll->base + ETH_PLL_CTL0);
+}
+
+static int g12a_ephy_pll_is_enabled(struct clk_hw *hw)
+{
+       struct g12a_ephy_pll *pll = g12a_ephy_pll_to_dev(hw);
+       unsigned int val;
+
+       val = readl(pll->base + ETH_PLL_CTL0);
+
+       return (val & PLL_CTL0_LOCK_DIG) ? 1 : 0;
+}
+
+static void g12a_ephy_pll_init(struct clk_hw *hw)
+{
+       struct g12a_ephy_pll *pll = g12a_ephy_pll_to_dev(hw);
+
+       /* Apply PLL HW settings */
+       writel(0x29c0040a, pll->base + ETH_PLL_CTL0);
+       writel(0x927e0000, pll->base + ETH_PLL_CTL1);
+       writel(0xac5f49e5, pll->base + ETH_PLL_CTL2);
+       writel(0x00000000, pll->base + ETH_PLL_CTL3);
+       writel(0x00000000, pll->base + ETH_PLL_CTL4);
+       writel(0x20200000, pll->base + ETH_PLL_CTL5);
+       writel(0x0000c002, pll->base + ETH_PLL_CTL6);
+       writel(0x00000023, pll->base + ETH_PLL_CTL7);
+}
+
+static const struct clk_ops g12a_ephy_pll_ops = {
+       .recalc_rate    = g12a_ephy_pll_recalc_rate,
+       .is_enabled     = g12a_ephy_pll_is_enabled,
+       .enable         = g12a_ephy_pll_enable,
+       .disable        = g12a_ephy_pll_disable,
+       .init           = g12a_ephy_pll_init,
+};
+
+static int g12a_enable_internal_mdio(struct g12a_mdio_mux *priv)
+{
+       int ret;
+
+       /* Enable the phy clock */
+       if (!priv->pll_is_enabled) {
+               ret = clk_prepare_enable(priv->pll);
+               if (ret)
+                       return ret;
+       }
+
+       priv->pll_is_enabled = true;
+
+       /* Initialize ephy control */
+       writel(EPHY_G12A_ID, priv->regs + ETH_PHY_CNTL0);
+       writel(FIELD_PREP(PHY_CNTL1_ST_MODE, 3) |
+              FIELD_PREP(PHY_CNTL1_ST_PHYADD, EPHY_DFLT_ADD) |
+              FIELD_PREP(PHY_CNTL1_MII_MODE, EPHY_MODE_RMII) |
+              PHY_CNTL1_CLK_EN |
+              PHY_CNTL1_CLKFREQ |
+              PHY_CNTL1_PHY_ENB,
+              priv->regs + ETH_PHY_CNTL1);
+       writel(PHY_CNTL2_USE_INTERNAL |
+              PHY_CNTL2_SMI_SRC_MAC |
+              PHY_CNTL2_RX_CLK_EPHY,
+              priv->regs + ETH_PHY_CNTL2);
+
+       return 0;
+}
+
+static int g12a_enable_external_mdio(struct g12a_mdio_mux *priv)
+{
+       /* Reset the mdio bus mux */
+       writel_relaxed(0x0, priv->regs + ETH_PHY_CNTL2);
+
+       /* Disable the phy clock if enabled */
+       if (priv->pll_is_enabled) {
+               clk_disable_unprepare(priv->pll);
+               priv->pll_is_enabled = false;
+       }
+
+       return 0;
+}
+
+static int g12a_mdio_switch_fn(int current_child, int desired_child,
+                              void *data)
+{
+       struct g12a_mdio_mux *priv = dev_get_drvdata(data);
+
+       if (current_child == desired_child)
+               return 0;
+
+       switch (desired_child) {
+       case MESON_G12A_MDIO_EXTERNAL_ID:
+               return g12a_enable_external_mdio(priv);
+       case MESON_G12A_MDIO_INTERNAL_ID:
+               return g12a_enable_internal_mdio(priv);
+       default:
+               return -EINVAL;
+       }
+}
+
+static const struct of_device_id g12a_mdio_mux_match[] = {
+       { .compatible = "amlogic,g12a-mdio-mux", },
+       {},
+};
+MODULE_DEVICE_TABLE(of, g12a_mdio_mux_match);
+
+static int g12a_ephy_glue_clk_register(struct device *dev)
+{
+       struct g12a_mdio_mux *priv = dev_get_drvdata(dev);
+       const char *parent_names[PLL_MUX_NUM_PARENT];
+       struct clk_init_data init;
+       struct g12a_ephy_pll *pll;
+       struct clk_mux *mux;
+       struct clk *clk;
+       char *name;
+       int i;
+
+       /* get the mux parents */
+       for (i = 0; i < PLL_MUX_NUM_PARENT; i++) {
+               char in_name[8];
+
+               snprintf(in_name, sizeof(in_name), "clkin%d", i);
+               clk = devm_clk_get(dev, in_name);
+               if (IS_ERR(clk)) {
+                       if (PTR_ERR(clk) != -EPROBE_DEFER)
+                               dev_err(dev, "Missing clock %s\n", in_name);
+                       return PTR_ERR(clk);
+               }
+
+               parent_names[i] = __clk_get_name(clk);
+       }
+
+       /* create the input mux */
+       mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL);
+       if (!mux)
+               return -ENOMEM;
+
+       name = kasprintf(GFP_KERNEL, "%s#mux", dev_name(dev));
+       if (!name)
+               return -ENOMEM;
+
+       init.name = name;
+       init.ops = &clk_mux_ro_ops;
+       init.flags = 0;
+       init.parent_names = parent_names;
+       init.num_parents = PLL_MUX_NUM_PARENT;
+
+       mux->reg = priv->regs + ETH_PLL_CTL0;
+       mux->shift = __ffs(PLL_CTL0_SEL);
+       mux->mask = PLL_CTL0_SEL >> mux->shift;
+       mux->hw.init = &init;
+
+       clk = devm_clk_register(dev, &mux->hw);
+       kfree(name);
+       if (IS_ERR(clk)) {
+               dev_err(dev, "failed to register input mux\n");
+               return PTR_ERR(clk);
+       }
+
+       /* create the pll */
+       pll = devm_kzalloc(dev, sizeof(*pll), GFP_KERNEL);
+       if (!pll)
+               return -ENOMEM;
+
+       name = kasprintf(GFP_KERNEL, "%s#pll", dev_name(dev));
+       if (!name)
+               return -ENOMEM;
+
+       init.name = name;
+       init.ops = &g12a_ephy_pll_ops;
+       init.flags = 0;
+       parent_names[0] = __clk_get_name(clk);
+       init.parent_names = parent_names;
+       init.num_parents = 1;
+
+       pll->base = priv->regs;
+       pll->hw.init = &init;
+
+       clk = devm_clk_register(dev, &pll->hw);
+       kfree(name);
+       if (IS_ERR(clk)) {
+               dev_err(dev, "failed to register input mux\n");
+               return PTR_ERR(clk);
+       }
+
+       priv->pll = clk;
+
+       return 0;
+}
+
+static int g12a_mdio_mux_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct g12a_mdio_mux *priv;
+       struct resource *res;
+       int ret;
+
+       priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+       if (!priv)
+               return -ENOMEM;
+
+       platform_set_drvdata(pdev, priv);
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       priv->regs = devm_ioremap_resource(dev, res);
+       if (IS_ERR(priv->regs))
+               return PTR_ERR(priv->regs);
+
+       priv->pclk = devm_clk_get(dev, "pclk");
+       if (IS_ERR(priv->pclk)) {
+               ret = PTR_ERR(priv->pclk);
+               if (ret != -EPROBE_DEFER)
+                       dev_err(dev, "failed to get peripheral clock\n");
+               return ret;
+       }
+
+       /* Make sure the device registers are clocked */
+       ret = clk_prepare_enable(priv->pclk);
+       if (ret) {
+               dev_err(dev, "failed to enable peripheral clock");
+               return ret;
+       }
+
+       /* Register PLL in CCF */
+       ret = g12a_ephy_glue_clk_register(dev);
+       if (ret)
+               goto err;
+
+       ret = mdio_mux_init(dev, dev->of_node, g12a_mdio_switch_fn,
+                           &priv->mux_handle, dev, NULL);
+       if (ret) {
+               if (ret != -EPROBE_DEFER)
+                       dev_err(dev, "mdio multiplexer init failed: %d", ret);
+               goto err;
+       }
+
+       return 0;
+
+err:
+       clk_disable_unprepare(priv->pclk);
+       return ret;
+}
+
+static int g12a_mdio_mux_remove(struct platform_device *pdev)
+{
+       struct g12a_mdio_mux *priv = platform_get_drvdata(pdev);
+
+       mdio_mux_uninit(priv->mux_handle);
+
+       if (priv->pll_is_enabled)
+               clk_disable_unprepare(priv->pll);
+
+       clk_disable_unprepare(priv->pclk);
+
+       return 0;
+}
+
+static struct platform_driver g12a_mdio_mux_driver = {
+       .probe          = g12a_mdio_mux_probe,
+       .remove         = g12a_mdio_mux_remove,
+       .driver         = {
+               .name   = "g12a-mdio_mux",
+               .of_match_table = g12a_mdio_mux_match,
+       },
+};
+module_platform_driver(g12a_mdio_mux_driver);
+
+MODULE_DESCRIPTION("Amlogic G12a MDIO multiplexer driver");
+MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
+MODULE_LICENSE("GPL v2");
index 0eec2913c289b83a77a238aca2da64e558378336..fa80d6dce8ee32a82a078c606bcfdefa9ea97d3e 100644 (file)
@@ -224,24 +224,33 @@ static int meson_gxl_config_intr(struct phy_device *phydev)
 
 static struct phy_driver meson_gxl_phy[] = {
        {
-               .phy_id         = 0x01814400,
-               .phy_id_mask    = 0xfffffff0,
+               PHY_ID_MATCH_EXACT(0x01814400),
                .name           = "Meson GXL Internal PHY",
-               .features       = PHY_BASIC_FEATURES,
+               /* PHY_BASIC_FEATURES */
                .flags          = PHY_IS_INTERNAL,
                .soft_reset     = genphy_soft_reset,
                .config_init    = meson_gxl_config_init,
-               .aneg_done      = genphy_aneg_done,
                .read_status    = meson_gxl_read_status,
                .ack_interrupt  = meson_gxl_ack_interrupt,
                .config_intr    = meson_gxl_config_intr,
                .suspend        = genphy_suspend,
                .resume         = genphy_resume,
+       }, {
+               PHY_ID_MATCH_EXACT(0x01803301),
+               .name           = "Meson G12A Internal PHY",
+               /* PHY_BASIC_FEATURES */
+               .flags          = PHY_IS_INTERNAL,
+               .soft_reset     = genphy_soft_reset,
+               .ack_interrupt  = meson_gxl_ack_interrupt,
+               .config_intr    = meson_gxl_config_intr,
+               .suspend        = genphy_suspend,
+               .resume         = genphy_resume,
        },
 };
 
 static struct mdio_device_id __maybe_unused meson_gxl_tbl[] = {
-       { 0x01814400, 0xfffffff0 },
+       { PHY_ID_MATCH_VENDOR(0x01814400) },
+       { PHY_ID_MATCH_VENDOR(0x01803301) },
        { }
 };
 
index 352da24f1f337f02e572f1cf8b09e486e7e8f0f9..ddd6b6374d8cffd1ec983f854cc3fb3cbb3c157b 100644 (file)
@@ -738,6 +738,31 @@ static int ksz8873mll_read_status(struct phy_device *phydev)
        return 0;
 }
 
+static int ksz9031_get_features(struct phy_device *phydev)
+{
+       int ret;
+
+       ret = genphy_read_abilities(phydev);
+       if (ret < 0)
+               return ret;
+
+       /* Silicon Errata Sheet (DS80000691D or DS80000692D):
+        * Whenever the device's Asymmetric Pause capability is set to 1,
+        * link-up may fail after a link-up to link-down transition.
+        *
+        * Workaround:
+        * Do not enable the Asymmetric Pause capability bit.
+        */
+       linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydev->supported);
+
+       /* We force setting the Pause capability as the core will force the
+        * Asymmetric Pause capability to 1 otherwise.
+        */
+       linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->supported);
+
+       return 0;
+}
+
 static int ksz9031_read_status(struct phy_device *phydev)
 {
        int err;
@@ -908,7 +933,7 @@ static struct phy_driver ksphy_driver[] = {
        .phy_id         = PHY_ID_KS8737,
        .phy_id_mask    = MICREL_PHY_ID_MASK,
        .name           = "Micrel KS8737",
-       .features       = PHY_BASIC_FEATURES,
+       /* PHY_BASIC_FEATURES */
        .driver_data    = &ks8737_type,
        .config_init    = kszphy_config_init,
        .ack_interrupt  = kszphy_ack_interrupt,
@@ -919,7 +944,7 @@ static struct phy_driver ksphy_driver[] = {
        .phy_id         = PHY_ID_KSZ8021,
        .phy_id_mask    = 0x00ffffff,
        .name           = "Micrel KSZ8021 or KSZ8031",
-       .features       = PHY_BASIC_FEATURES,
+       /* PHY_BASIC_FEATURES */
        .driver_data    = &ksz8021_type,
        .probe          = kszphy_probe,
        .config_init    = kszphy_config_init,
@@ -934,7 +959,7 @@ static struct phy_driver ksphy_driver[] = {
        .phy_id         = PHY_ID_KSZ8031,
        .phy_id_mask    = 0x00ffffff,
        .name           = "Micrel KSZ8031",
-       .features       = PHY_BASIC_FEATURES,
+       /* PHY_BASIC_FEATURES */
        .driver_data    = &ksz8021_type,
        .probe          = kszphy_probe,
        .config_init    = kszphy_config_init,
@@ -949,7 +974,7 @@ static struct phy_driver ksphy_driver[] = {
        .phy_id         = PHY_ID_KSZ8041,
        .phy_id_mask    = MICREL_PHY_ID_MASK,
        .name           = "Micrel KSZ8041",
-       .features       = PHY_BASIC_FEATURES,
+       /* PHY_BASIC_FEATURES */
        .driver_data    = &ksz8041_type,
        .probe          = kszphy_probe,
        .config_init    = ksz8041_config_init,
@@ -965,7 +990,7 @@ static struct phy_driver ksphy_driver[] = {
        .phy_id         = PHY_ID_KSZ8041RNLI,
        .phy_id_mask    = MICREL_PHY_ID_MASK,
        .name           = "Micrel KSZ8041RNLI",
-       .features       = PHY_BASIC_FEATURES,
+       /* PHY_BASIC_FEATURES */
        .driver_data    = &ksz8041_type,
        .probe          = kszphy_probe,
        .config_init    = kszphy_config_init,
@@ -980,7 +1005,7 @@ static struct phy_driver ksphy_driver[] = {
        .phy_id         = PHY_ID_KSZ8051,
        .phy_id_mask    = MICREL_PHY_ID_MASK,
        .name           = "Micrel KSZ8051",
-       .features       = PHY_BASIC_FEATURES,
+       /* PHY_BASIC_FEATURES */
        .driver_data    = &ksz8051_type,
        .probe          = kszphy_probe,
        .config_init    = kszphy_config_init,
@@ -995,7 +1020,7 @@ static struct phy_driver ksphy_driver[] = {
        .phy_id         = PHY_ID_KSZ8001,
        .name           = "Micrel KSZ8001 or KS8721",
        .phy_id_mask    = 0x00fffffc,
-       .features       = PHY_BASIC_FEATURES,
+       /* PHY_BASIC_FEATURES */
        .driver_data    = &ksz8041_type,
        .probe          = kszphy_probe,
        .config_init    = kszphy_config_init,
@@ -1010,7 +1035,7 @@ static struct phy_driver ksphy_driver[] = {
        .phy_id         = PHY_ID_KSZ8081,
        .name           = "Micrel KSZ8081 or KSZ8091",
        .phy_id_mask    = MICREL_PHY_ID_MASK,
-       .features       = PHY_BASIC_FEATURES,
+       /* PHY_BASIC_FEATURES */
        .driver_data    = &ksz8081_type,
        .probe          = kszphy_probe,
        .config_init    = kszphy_config_init,
@@ -1025,7 +1050,7 @@ static struct phy_driver ksphy_driver[] = {
        .phy_id         = PHY_ID_KSZ8061,
        .name           = "Micrel KSZ8061",
        .phy_id_mask    = MICREL_PHY_ID_MASK,
-       .features       = PHY_BASIC_FEATURES,
+       /* PHY_BASIC_FEATURES */
        .config_init    = ksz8061_config_init,
        .ack_interrupt  = kszphy_ack_interrupt,
        .config_intr    = kszphy_config_intr,
@@ -1035,7 +1060,7 @@ static struct phy_driver ksphy_driver[] = {
        .phy_id         = PHY_ID_KSZ9021,
        .phy_id_mask    = 0x000ffffe,
        .name           = "Micrel KSZ9021 Gigabit PHY",
-       .features       = PHY_GBIT_FEATURES,
+       /* PHY_GBIT_FEATURES */
        .driver_data    = &ksz9021_type,
        .probe          = kszphy_probe,
        .config_init    = ksz9021_config_init,
@@ -1052,9 +1077,9 @@ static struct phy_driver ksphy_driver[] = {
        .phy_id         = PHY_ID_KSZ9031,
        .phy_id_mask    = MICREL_PHY_ID_MASK,
        .name           = "Micrel KSZ9031 Gigabit PHY",
-       .features       = PHY_GBIT_FEATURES,
        .driver_data    = &ksz9021_type,
        .probe          = kszphy_probe,
+       .get_features   = ksz9031_get_features,
        .config_init    = ksz9031_config_init,
        .soft_reset     = genphy_soft_reset,
        .read_status    = ksz9031_read_status,
@@ -1069,7 +1094,7 @@ static struct phy_driver ksphy_driver[] = {
        .phy_id         = PHY_ID_KSZ9131,
        .phy_id_mask    = MICREL_PHY_ID_MASK,
        .name           = "Microchip KSZ9131 Gigabit PHY",
-       .features       = PHY_GBIT_FEATURES,
+       /* PHY_GBIT_FEATURES */
        .driver_data    = &ksz9021_type,
        .probe          = kszphy_probe,
        .config_init    = ksz9131_config_init,
@@ -1085,7 +1110,7 @@ static struct phy_driver ksphy_driver[] = {
        .phy_id         = PHY_ID_KSZ8873MLL,
        .phy_id_mask    = MICREL_PHY_ID_MASK,
        .name           = "Micrel KSZ8873MLL Switch",
-       .features       = PHY_BASIC_FEATURES,
+       /* PHY_BASIC_FEATURES */
        .config_init    = kszphy_config_init,
        .config_aneg    = ksz8873mll_config_aneg,
        .read_status    = ksz8873mll_read_status,
@@ -1095,7 +1120,7 @@ static struct phy_driver ksphy_driver[] = {
        .phy_id         = PHY_ID_KSZ886X,
        .phy_id_mask    = MICREL_PHY_ID_MASK,
        .name           = "Micrel KSZ886X Switch",
-       .features       = PHY_BASIC_FEATURES,
+       /* PHY_BASIC_FEATURES */
        .config_init    = kszphy_config_init,
        .suspend        = genphy_suspend,
        .resume         = genphy_resume,
@@ -1103,7 +1128,7 @@ static struct phy_driver ksphy_driver[] = {
        .phy_id         = PHY_ID_KSZ8795,
        .phy_id_mask    = MICREL_PHY_ID_MASK,
        .name           = "Micrel KSZ8795",
-       .features       = PHY_BASIC_FEATURES,
+       /* PHY_BASIC_FEATURES */
        .config_init    = kszphy_config_init,
        .config_aneg    = ksz8873mll_config_aneg,
        .read_status    = ksz8873mll_read_status,
@@ -1113,7 +1138,7 @@ static struct phy_driver ksphy_driver[] = {
        .phy_id         = PHY_ID_KSZ9477,
        .phy_id_mask    = MICREL_PHY_ID_MASK,
        .name           = "Microchip KSZ9477",
-       .features       = PHY_GBIT_FEATURES,
+       /* PHY_GBIT_FEATURES */
        .config_init    = kszphy_config_init,
        .suspend        = genphy_suspend,
        .resume         = genphy_resume,
index c6cbb3aa8ae0939002a359988be8b08aff2d7ae9..eb1b3287fe0817a548424ab6e507f3664cf026b4 100644 (file)
@@ -333,7 +333,7 @@ static struct phy_driver microchip_phy_driver[] = {
        .phy_id_mask    = 0xfffffff0,
        .name           = "Microchip LAN88xx",
 
-       .features       = PHY_GBIT_FEATURES,
+       /* PHY_GBIT_FEATURES */
 
        .probe          = lan88xx_probe,
        .remove         = lan88xx_remove,
index db50efb30df58bcacbed107bb1cf7eb2dd83ed21..623313f077d170277322d618b8160850835521a8 100644 (file)
@@ -1882,7 +1882,7 @@ static struct phy_driver vsc85xx_driver[] = {
        .phy_id         = PHY_ID_VSC8530,
        .name           = "Microsemi FE VSC8530",
        .phy_id_mask    = 0xfffffff0,
-       .features       = PHY_BASIC_FEATURES,
+       /* PHY_BASIC_FEATURES */
        .soft_reset     = &genphy_soft_reset,
        .config_init    = &vsc85xx_config_init,
        .config_aneg    = &vsc85xx_config_aneg,
@@ -1907,7 +1907,7 @@ static struct phy_driver vsc85xx_driver[] = {
        .phy_id         = PHY_ID_VSC8531,
        .name           = "Microsemi VSC8531",
        .phy_id_mask    = 0xfffffff0,
-       .features       = PHY_GBIT_FEATURES,
+       /* PHY_GBIT_FEATURES */
        .soft_reset     = &genphy_soft_reset,
        .config_init    = &vsc85xx_config_init,
        .config_aneg    = &vsc85xx_config_aneg,
@@ -1932,7 +1932,7 @@ static struct phy_driver vsc85xx_driver[] = {
        .phy_id         = PHY_ID_VSC8540,
        .name           = "Microsemi FE VSC8540 SyncE",
        .phy_id_mask    = 0xfffffff0,
-       .features       = PHY_BASIC_FEATURES,
+       /* PHY_BASIC_FEATURES */
        .soft_reset     = &genphy_soft_reset,
        .config_init    = &vsc85xx_config_init,
        .config_aneg    = &vsc85xx_config_aneg,
@@ -1957,7 +1957,7 @@ static struct phy_driver vsc85xx_driver[] = {
        .phy_id         = PHY_ID_VSC8541,
        .name           = "Microsemi VSC8541 SyncE",
        .phy_id_mask    = 0xfffffff0,
-       .features       = PHY_GBIT_FEATURES,
+       /* PHY_GBIT_FEATURES */
        .soft_reset     = &genphy_soft_reset,
        .config_init    = &vsc85xx_config_init,
        .config_aneg    = &vsc85xx_config_aneg,
@@ -1982,7 +1982,7 @@ static struct phy_driver vsc85xx_driver[] = {
        .phy_id         = PHY_ID_VSC8574,
        .name           = "Microsemi GE VSC8574 SyncE",
        .phy_id_mask    = 0xfffffff0,
-       .features       = PHY_GBIT_FEATURES,
+       /* PHY_GBIT_FEATURES */
        .soft_reset     = &genphy_soft_reset,
        .config_init    = &vsc8584_config_init,
        .config_aneg    = &vsc85xx_config_aneg,
@@ -2008,7 +2008,7 @@ static struct phy_driver vsc85xx_driver[] = {
        .phy_id         = PHY_ID_VSC8584,
        .name           = "Microsemi GE VSC8584 SyncE",
        .phy_id_mask    = 0xfffffff0,
-       .features       = PHY_GBIT_FEATURES,
+       /* PHY_GBIT_FEATURES */
        .soft_reset     = &genphy_soft_reset,
        .config_init    = &vsc8584_config_init,
        .config_aneg    = &vsc85xx_config_aneg,
index 42282a86b68070ae1d19e8cd371d9cc7ffe57c76..a221dd552c3c649cc2932cebb7490e71629cb62b 100644 (file)
@@ -128,7 +128,7 @@ static struct phy_driver dp83865_driver[] = { {
        .phy_id = DP83865_PHY_ID,
        .phy_id_mask = 0xfffffff0,
        .name = "NatSemi DP83865",
-       .features = PHY_GBIT_FEATURES,
+       /* PHY_GBIT_FEATURES */
        .config_init = ns_config_init,
        .ack_interrupt = ns_ack_interrupt,
        .config_intr = ns_config_intr,
index 9e24d95694242aa88c5940b891da1c01a75a3198..abe13dfe50ad34e136f38a7394f3a1ef4b9edcbf 100644 (file)
@@ -262,12 +262,30 @@ int genphy_c45_read_lpa(struct phy_device *phydev)
 {
        int val;
 
+       val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
+       if (val < 0)
+               return val;
+
+       if (!(val & MDIO_AN_STAT1_COMPLETE)) {
+               linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+                                  phydev->lp_advertising);
+               mii_10gbt_stat_mod_linkmode_lpa_t(phydev->lp_advertising, 0);
+               mii_adv_mod_linkmode_adv_t(phydev->lp_advertising, 0);
+               phydev->pause = 0;
+               phydev->asym_pause = 0;
+
+               return 0;
+       }
+
+       linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, phydev->lp_advertising,
+                        val & MDIO_AN_STAT1_LPABLE);
+
        /* Read the link partner's base page advertisement */
        val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA);
        if (val < 0)
                return val;
 
-       mii_lpa_mod_linkmode_lpa_t(phydev->lp_advertising, val);
+       mii_adv_mod_linkmode_adv_t(phydev->lp_advertising, val);
        phydev->pause = val & LPA_PAUSE_CAP ? 1 : 0;
        phydev->asym_pause = val & LPA_PAUSE_ASYM ? 1 : 0;
 
@@ -498,21 +516,10 @@ int gen10g_config_aneg(struct phy_device *phydev)
 }
 EXPORT_SYMBOL_GPL(gen10g_config_aneg);
 
-static int gen10g_read_status(struct phy_device *phydev)
-{
-       /* For now just lie and say it's 10G all the time */
-       phydev->speed = SPEED_10000;
-       phydev->duplex = DUPLEX_FULL;
-
-       return genphy_c45_read_link(phydev);
-}
-
-struct phy_driver genphy_10g_driver = {
+struct phy_driver genphy_c45_driver = {
        .phy_id         = 0xffffffff,
        .phy_id_mask    = 0xffffffff,
-       .name           = "Generic 10G PHY",
+       .name           = "Generic Clause 45 PHY",
        .soft_reset     = genphy_no_soft_reset,
-       .features       = PHY_10GBIT_FEATURES,
-       .config_aneg    = gen10g_config_aneg,
-       .read_status    = gen10g_read_status,
+       .read_status    = genphy_c45_read_status,
 };
index 5016cd5fd7c7595c4e7d0818d13337cf59bbab53..12ce671020a5dede2f484558152429f0c0d6e568 100644 (file)
@@ -8,6 +8,11 @@
 
 const char *phy_speed_to_str(int speed)
 {
+       BUILD_BUG_ON_MSG(__ETHTOOL_LINK_MODE_MASK_NBITS != 67,
+               "Enum ethtool_link_mode_bit_indices and phylib are out of sync. "
+               "If a speed or mode has been added please update phy_speed_to_str "
+               "and the PHY settings array.\n");
+
        switch (speed) {
        case SPEED_10:
                return "10Mbps";
@@ -35,6 +40,8 @@ const char *phy_speed_to_str(int speed)
                return "56Gbps";
        case SPEED_100000:
                return "100Gbps";
+       case SPEED_200000:
+               return "200Gbps";
        case SPEED_UNKNOWN:
                return "Unknown";
        default:
@@ -58,222 +65,81 @@ EXPORT_SYMBOL_GPL(phy_duplex_to_str);
 /* A mapping of all SUPPORTED settings to speed/duplex.  This table
  * must be grouped by speed and sorted in descending match priority
  * - iow, descending speed. */
+
+#define PHY_SETTING(s, d, b) { .speed = SPEED_ ## s, .duplex = DUPLEX_ ## d, \
+                              .bit = ETHTOOL_LINK_MODE_ ## b ## _BIT}
+
 static const struct phy_setting settings[] = {
+       /* 200G */
+       PHY_SETTING( 200000, FULL, 200000baseCR4_Full           ),
+       PHY_SETTING( 200000, FULL, 200000baseKR4_Full           ),
+       PHY_SETTING( 200000, FULL, 200000baseLR4_ER4_FR4_Full   ),
+       PHY_SETTING( 200000, FULL, 200000baseDR4_Full           ),
+       PHY_SETTING( 200000, FULL, 200000baseSR4_Full           ),
        /* 100G */
-       {
-               .speed = SPEED_100000,
-               .duplex = DUPLEX_FULL,
-               .bit = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
-       },
-       {
-               .speed = SPEED_100000,
-               .duplex = DUPLEX_FULL,
-               .bit = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
-       },
-       {
-               .speed = SPEED_100000,
-               .duplex = DUPLEX_FULL,
-               .bit = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
-       },
-       {
-               .speed = SPEED_100000,
-               .duplex = DUPLEX_FULL,
-               .bit = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
-       },
+       PHY_SETTING( 100000, FULL, 100000baseCR4_Full           ),
+       PHY_SETTING( 100000, FULL, 100000baseKR4_Full           ),
+       PHY_SETTING( 100000, FULL, 100000baseLR4_ER4_Full       ),
+       PHY_SETTING( 100000, FULL, 100000baseSR4_Full           ),
+       PHY_SETTING( 100000, FULL, 100000baseCR2_Full           ),
+       PHY_SETTING( 100000, FULL, 100000baseKR2_Full           ),
+       PHY_SETTING( 100000, FULL, 100000baseLR2_ER2_FR2_Full   ),
+       PHY_SETTING( 100000, FULL, 100000baseDR2_Full           ),
+       PHY_SETTING( 100000, FULL, 100000baseSR2_Full           ),
        /* 56G */
-       {
-               .speed = SPEED_56000,
-               .duplex = DUPLEX_FULL,
-               .bit = ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT,
-       },
-       {
-               .speed = SPEED_56000,
-               .duplex = DUPLEX_FULL,
-               .bit = ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT,
-       },
-       {
-               .speed = SPEED_56000,
-               .duplex = DUPLEX_FULL,
-               .bit = ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT,
-       },
-       {
-               .speed = SPEED_56000,
-               .duplex = DUPLEX_FULL,
-               .bit = ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT,
-       },
+       PHY_SETTING(  56000, FULL,  56000baseCR4_Full           ),
+       PHY_SETTING(  56000, FULL,  56000baseKR4_Full           ),
+       PHY_SETTING(  56000, FULL,  56000baseLR4_Full           ),
+       PHY_SETTING(  56000, FULL,  56000baseSR4_Full           ),
        /* 50G */
-       {
-               .speed = SPEED_50000,
-               .duplex = DUPLEX_FULL,
-               .bit = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
-       },
-       {
-               .speed = SPEED_50000,
-               .duplex = DUPLEX_FULL,
-               .bit = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
-       },
-       {
-               .speed = SPEED_50000,
-               .duplex = DUPLEX_FULL,
-               .bit = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
-       },
+       PHY_SETTING(  50000, FULL,  50000baseCR2_Full           ),
+       PHY_SETTING(  50000, FULL,  50000baseKR2_Full           ),
+       PHY_SETTING(  50000, FULL,  50000baseSR2_Full           ),
+       PHY_SETTING(  50000, FULL,  50000baseCR_Full            ),
+       PHY_SETTING(  50000, FULL,  50000baseKR_Full            ),
+       PHY_SETTING(  50000, FULL,  50000baseLR_ER_FR_Full      ),
+       PHY_SETTING(  50000, FULL,  50000baseDR_Full            ),
+       PHY_SETTING(  50000, FULL,  50000baseSR_Full            ),
        /* 40G */
-       {
-               .speed = SPEED_40000,
-               .duplex = DUPLEX_FULL,
-               .bit = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
-       },
-       {
-               .speed = SPEED_40000,
-               .duplex = DUPLEX_FULL,
-               .bit = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
-       },
-       {
-               .speed = SPEED_40000,
-               .duplex = DUPLEX_FULL,
-               .bit = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
-       },
-       {
-               .speed = SPEED_40000,
-               .duplex = DUPLEX_FULL,
-               .bit = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
-       },
+       PHY_SETTING(  40000, FULL,  40000baseCR4_Full           ),
+       PHY_SETTING(  40000, FULL,  40000baseKR4_Full           ),
+       PHY_SETTING(  40000, FULL,  40000baseLR4_Full           ),
+       PHY_SETTING(  40000, FULL,  40000baseSR4_Full           ),
        /* 25G */
-       {
-               .speed = SPEED_25000,
-               .duplex = DUPLEX_FULL,
-               .bit = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
-       },
-       {
-               .speed = SPEED_25000,
-               .duplex = DUPLEX_FULL,
-               .bit = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
-       },
-       {
-               .speed = SPEED_25000,
-               .duplex = DUPLEX_FULL,
-               .bit = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
-       },
-
+       PHY_SETTING(  25000, FULL,  25000baseCR_Full            ),
+       PHY_SETTING(  25000, FULL,  25000baseKR_Full            ),
+       PHY_SETTING(  25000, FULL,  25000baseSR_Full            ),
        /* 20G */
-       {
-               .speed = SPEED_20000,
-               .duplex = DUPLEX_FULL,
-               .bit = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT,
-       },
-       {
-               .speed = SPEED_20000,
-               .duplex = DUPLEX_FULL,
-               .bit = ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT,
-       },
+       PHY_SETTING(  20000, FULL,  20000baseKR2_Full           ),
+       PHY_SETTING(  20000, FULL,  20000baseMLD2_Full          ),
        /* 10G */
-       {
-               .speed = SPEED_10000,
-               .duplex = DUPLEX_FULL,
-               .bit = ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
-       },
-       {
-               .speed = SPEED_10000,
-               .duplex = DUPLEX_FULL,
-               .bit = ETHTOOL_LINK_MODE_10000baseER_Full_BIT,
-       },
-       {
-               .speed = SPEED_10000,
-               .duplex = DUPLEX_FULL,
-               .bit = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
-       },
-       {
-               .speed = SPEED_10000,
-               .duplex = DUPLEX_FULL,
-               .bit = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
-       },
-       {
-               .speed = SPEED_10000,
-               .duplex = DUPLEX_FULL,
-               .bit = ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
-       },
-       {
-               .speed = SPEED_10000,
-               .duplex = DUPLEX_FULL,
-               .bit = ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT,
-       },
-       {
-               .speed = SPEED_10000,
-               .duplex = DUPLEX_FULL,
-               .bit = ETHTOOL_LINK_MODE_10000baseR_FEC_BIT,
-       },
-       {
-               .speed = SPEED_10000,
-               .duplex = DUPLEX_FULL,
-               .bit = ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
-       },
-       {
-               .speed = SPEED_10000,
-               .duplex = DUPLEX_FULL,
-               .bit = ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
-       },
+       PHY_SETTING(  10000, FULL,  10000baseCR_Full            ),
+       PHY_SETTING(  10000, FULL,  10000baseER_Full            ),
+       PHY_SETTING(  10000, FULL,  10000baseKR_Full            ),
+       PHY_SETTING(  10000, FULL,  10000baseKX4_Full           ),
+       PHY_SETTING(  10000, FULL,  10000baseLR_Full            ),
+       PHY_SETTING(  10000, FULL,  10000baseLRM_Full           ),
+       PHY_SETTING(  10000, FULL,  10000baseR_FEC              ),
+       PHY_SETTING(  10000, FULL,  10000baseSR_Full            ),
+       PHY_SETTING(  10000, FULL,  10000baseT_Full             ),
        /* 5G */
-       {
-               .speed = SPEED_5000,
-               .duplex = DUPLEX_FULL,
-               .bit = ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
-       },
-
+       PHY_SETTING(   5000, FULL,   5000baseT_Full             ),
        /* 2.5G */
-       {
-               .speed = SPEED_2500,
-               .duplex = DUPLEX_FULL,
-               .bit = ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
-       },
-       {
-               .speed = SPEED_2500,
-               .duplex = DUPLEX_FULL,
-               .bit = ETHTOOL_LINK_MODE_2500baseX_Full_BIT,
-       },
+       PHY_SETTING(   2500, FULL,   2500baseT_Full             ),
+       PHY_SETTING(   2500, FULL,   2500baseX_Full             ),
        /* 1G */
-       {
-               .speed = SPEED_1000,
-               .duplex = DUPLEX_FULL,
-               .bit = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
-       },
-       {
-               .speed = SPEED_1000,
-               .duplex = DUPLEX_FULL,
-               .bit = ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
-       },
-       {
-               .speed = SPEED_1000,
-               .duplex = DUPLEX_HALF,
-               .bit = ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
-       },
-       {
-               .speed = SPEED_1000,
-               .duplex = DUPLEX_FULL,
-               .bit = ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
-       },
+       PHY_SETTING(   1000, FULL,   1000baseKX_Full            ),
+       PHY_SETTING(   1000, FULL,   1000baseT_Full             ),
+       PHY_SETTING(   1000, HALF,   1000baseT_Half             ),
+       PHY_SETTING(   1000, FULL,   1000baseX_Full             ),
        /* 100M */
-       {
-               .speed = SPEED_100,
-               .duplex = DUPLEX_FULL,
-               .bit = ETHTOOL_LINK_MODE_100baseT_Full_BIT,
-       },
-       {
-               .speed = SPEED_100,
-               .duplex = DUPLEX_HALF,
-               .bit = ETHTOOL_LINK_MODE_100baseT_Half_BIT,
-       },
+       PHY_SETTING(    100, FULL,    100baseT_Full             ),
+       PHY_SETTING(    100, HALF,    100baseT_Half             ),
        /* 10M */
-       {
-               .speed = SPEED_10,
-               .duplex = DUPLEX_FULL,
-               .bit = ETHTOOL_LINK_MODE_10baseT_Full_BIT,
-       },
-       {
-               .speed = SPEED_10,
-               .duplex = DUPLEX_HALF,
-               .bit = ETHTOOL_LINK_MODE_10baseT_Half_BIT,
-       },
+       PHY_SETTING(     10, FULL,     10baseT_Full             ),
+       PHY_SETTING(     10, HALF,     10baseT_Half             ),
 };
+#undef PHY_SETTING
 
 /**
  * phy_lookup_setting - lookup a PHY setting
index 77068c545de0d33607981e7a94a32bf7ed1ff34c..48adb3d1f1ee3f24e4e939f6ee62604d2dd92c38 100644 (file)
@@ -225,7 +225,7 @@ static void phy_mdio_device_remove(struct mdio_device *mdiodev)
 }
 
 static struct phy_driver genphy_driver;
-extern struct phy_driver genphy_10g_driver;
+extern struct phy_driver genphy_c45_driver;
 
 static LIST_HEAD(phy_fixup_list);
 static DEFINE_MUTEX(phy_fixup_lock);
@@ -1174,7 +1174,7 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
         */
        if (!d->driver) {
                if (phydev->is_c45)
-                       d->driver = &genphy_10g_driver.mdiodrv.driver;
+                       d->driver = &genphy_c45_driver.mdiodrv.driver;
                else
                        d->driver = &genphy_driver.mdiodrv.driver;
 
@@ -1335,7 +1335,7 @@ EXPORT_SYMBOL_GPL(phy_driver_is_genphy);
 bool phy_driver_is_genphy_10g(struct phy_device *phydev)
 {
        return phy_driver_is_genphy_kind(phydev,
-                                        &genphy_10g_driver.mdiodrv.driver);
+                                        &genphy_c45_driver.mdiodrv.driver);
 }
 EXPORT_SYMBOL_GPL(phy_driver_is_genphy_10g);
 
@@ -1710,23 +1710,19 @@ int genphy_update_link(struct phy_device *phydev)
         */
        if (!phy_polling_mode(phydev)) {
                status = phy_read(phydev, MII_BMSR);
-               if (status < 0) {
+               if (status < 0)
                        return status;
-               } else if (status & BMSR_LSTATUS) {
-                       phydev->link = 1;
-                       return 0;
-               }
+               else if (status & BMSR_LSTATUS)
+                       goto done;
        }
 
        /* Read link and autonegotiation status */
        status = phy_read(phydev, MII_BMSR);
        if (status < 0)
                return status;
-
-       if ((status & BMSR_LSTATUS) == 0)
-               phydev->link = 0;
-       else
-               phydev->link = 1;
+done:
+       phydev->link = status & BMSR_LSTATUS ? 1 : 0;
+       phydev->autoneg_complete = status & BMSR_ANEGCOMPLETE ? 1 : 0;
 
        return 0;
 }
@@ -1743,23 +1739,22 @@ EXPORT_SYMBOL(genphy_update_link);
  */
 int genphy_read_status(struct phy_device *phydev)
 {
-       int adv;
-       int err;
-       int lpa;
-       int lpagb = 0;
+       int adv, lpa, lpagb, err;
 
        /* Update the link, but return if there was an error */
        err = genphy_update_link(phydev);
        if (err)
                return err;
 
+       phydev->speed = SPEED_UNKNOWN;
+       phydev->duplex = DUPLEX_UNKNOWN;
+       phydev->pause = 0;
+       phydev->asym_pause = 0;
+
        linkmode_zero(phydev->lp_advertising);
 
-       if (AUTONEG_ENABLE == phydev->autoneg) {
-               if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
-                                     phydev->supported) ||
-                   linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
-                                     phydev->supported)) {
+       if (phydev->autoneg == AUTONEG_ENABLE && phydev->autoneg_complete) {
+               if (phydev->is_gigabit_capable) {
                        lpagb = phy_read(phydev, MII_STAT1000);
                        if (lpagb < 0)
                                return lpagb;
@@ -1785,14 +1780,8 @@ int genphy_read_status(struct phy_device *phydev)
                        return lpa;
 
                mii_lpa_mod_linkmode_lpa_t(phydev->lp_advertising, lpa);
-
-               phydev->speed = SPEED_UNKNOWN;
-               phydev->duplex = DUPLEX_UNKNOWN;
-               phydev->pause = 0;
-               phydev->asym_pause = 0;
-
                phy_resolve_aneg_linkmode(phydev);
-       } else {
+       } else if (phydev->autoneg == AUTONEG_DISABLE) {
                int bmcr = phy_read(phydev, MII_BMCR);
 
                if (bmcr < 0)
@@ -1809,9 +1798,6 @@ int genphy_read_status(struct phy_device *phydev)
                        phydev->speed = SPEED_100;
                else
                        phydev->speed = SPEED_10;
-
-               phydev->pause = 0;
-               phydev->asym_pause = 0;
        }
 
        return 0;
@@ -1887,6 +1873,54 @@ int genphy_config_init(struct phy_device *phydev)
 }
 EXPORT_SYMBOL(genphy_config_init);
 
+/**
+ * genphy_read_abilities - read PHY abilities from Clause 22 registers
+ * @phydev: target phy_device struct
+ *
+ * Description: Reads the PHY's abilities and populates
+ * phydev->supported accordingly.
+ *
+ * Returns: 0 on success, < 0 on failure
+ */
+int genphy_read_abilities(struct phy_device *phydev)
+{
+       int val;
+
+       linkmode_set_bit_array(phy_basic_ports_array,
+                              ARRAY_SIZE(phy_basic_ports_array),
+                              phydev->supported);
+
+       val = phy_read(phydev, MII_BMSR);
+       if (val < 0)
+               return val;
+
+       linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, phydev->supported,
+                        val & BMSR_ANEGCAPABLE);
+
+       linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, phydev->supported,
+                        val & BMSR_100FULL);
+       linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, phydev->supported,
+                        val & BMSR_100HALF);
+       linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, phydev->supported,
+                        val & BMSR_10FULL);
+       linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, phydev->supported,
+                        val & BMSR_10HALF);
+
+       if (val & BMSR_ESTATEN) {
+               val = phy_read(phydev, MII_ESTATUS);
+               if (val < 0)
+                       return val;
+
+               linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+                                phydev->supported, val & ESTATUS_1000_TFULL);
+               linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+                                phydev->supported, val & ESTATUS_1000_THALF);
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL(genphy_read_abilities);
+
 /* This is used for the phy device which doesn't support the MMD extended
  * register access, but it does have side effect when we are trying to access
  * the MMD register via indirect method.
@@ -2104,12 +2138,24 @@ static int phy_probe(struct device *dev)
         */
        if (phydrv->features) {
                linkmode_copy(phydev->supported, phydrv->features);
-       } else {
+       } else if (phydrv->get_features) {
                err = phydrv->get_features(phydev);
-               if (err)
-                       goto out;
+       } else if (phydev->is_c45) {
+               err = genphy_c45_pma_read_abilities(phydev);
+       } else {
+               err = genphy_read_abilities(phydev);
        }
 
+       if (err)
+               goto out;
+
+       if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+                             phydev->supported))
+               phydev->is_gigabit_capable = 1;
+       if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+                             phydev->supported))
+               phydev->is_gigabit_capable = 1;
+
        of_set_phy_supported(phydev);
        linkmode_copy(phydev->advertising, phydev->supported);
 
@@ -2177,11 +2223,11 @@ int phy_driver_register(struct phy_driver *new_driver, struct module *owner)
        int retval;
 
        /* Either the features are hard coded, or dynamically
-        * determine. It cannot be both or neither
+        * determined. It cannot be both.
         */
-       if (WARN_ON((!new_driver->features && !new_driver->get_features) ||
-                   (new_driver->features && new_driver->get_features))) {
-               pr_err("%s: Driver features are missing\n", new_driver->name);
+       if (WARN_ON(new_driver->features && new_driver->get_features)) {
+               pr_err("%s: features and get_features must not both be set\n",
+                      new_driver->name);
                return -EINVAL;
        }
 
@@ -2243,8 +2289,7 @@ static struct phy_driver genphy_driver = {
        .phy_id_mask    = 0xffffffff,
        .name           = "Generic PHY",
        .soft_reset     = genphy_no_soft_reset,
-       .config_init    = genphy_config_init,
-       .features       = PHY_GBIT_ALL_PORTS_FEATURES,
+       .get_features   = genphy_read_abilities,
        .aneg_done      = genphy_aneg_done,
        .suspend        = genphy_suspend,
        .resume         = genphy_resume,
@@ -2261,14 +2306,14 @@ static int __init phy_init(void)
 
        features_init();
 
-       rc = phy_driver_register(&genphy_10g_driver, THIS_MODULE);
+       rc = phy_driver_register(&genphy_c45_driver, THIS_MODULE);
        if (rc)
-               goto err_10g;
+               goto err_c45;
 
        rc = phy_driver_register(&genphy_driver, THIS_MODULE);
        if (rc) {
-               phy_driver_unregister(&genphy_10g_driver);
-err_10g:
+               phy_driver_unregister(&genphy_c45_driver);
+err_c45:
                mdio_bus_exit();
        }
 
@@ -2277,7 +2322,7 @@ static int __init phy_init(void)
 
 static void __exit phy_exit(void)
 {
-       phy_driver_unregister(&genphy_10g_driver);
+       phy_driver_unregister(&genphy_c45_driver);
        phy_driver_unregister(&genphy_driver);
        mdio_bus_exit();
 }
index 5486f6fb2ab2dc7a1428163a74615d650e93ab26..1b15a991ee061dd9cdbb97ed9c27643f0105cdef 100644 (file)
@@ -110,7 +110,7 @@ static struct phy_driver qs6612_driver[] = { {
        .phy_id         = 0x00181440,
        .name           = "QS6612",
        .phy_id_mask    = 0xfffffff0,
-       .features       = PHY_BASIC_FEATURES,
+       /* PHY_BASIC_FEATURES */
        .config_init    = qs6612_config_init,
        .ack_interrupt  = qs6612_ack_interrupt,
        .config_intr    = qs6612_config_intr,
index 10df52ccddfeed9970f8d688f3cebb2300f79722..d6a10f323117ba9e92e68727aaf2da77eb90e9da 100644 (file)
@@ -151,21 +151,14 @@ static int rtl8211_config_aneg(struct phy_device *phydev)
 static int rtl8211c_config_init(struct phy_device *phydev)
 {
        /* RTL8211C has an issue when operating in Gigabit slave mode */
-       phy_set_bits(phydev, MII_CTRL1000,
-                    CTL1000_ENABLE_MASTER | CTL1000_AS_MASTER);
-
-       return genphy_config_init(phydev);
+       return phy_set_bits(phydev, MII_CTRL1000,
+                           CTL1000_ENABLE_MASTER | CTL1000_AS_MASTER);
 }
 
 static int rtl8211f_config_init(struct phy_device *phydev)
 {
-       int ret;
        u16 val = 0;
 
-       ret = genphy_config_init(phydev);
-       if (ret < 0)
-               return ret;
-
        /* enable TX-delay for rgmii-id and rgmii-txid, otherwise disable it */
        if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
            phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
@@ -192,10 +185,6 @@ static int rtl8366rb_config_init(struct phy_device *phydev)
 {
        int ret;
 
-       ret = genphy_config_init(phydev);
-       if (ret < 0)
-               return ret;
-
        ret = phy_set_bits(phydev, RTL8366RB_POWER_SAVE,
                           RTL8366RB_POWER_SAVE_ON);
        if (ret) {
@@ -210,11 +199,9 @@ static struct phy_driver realtek_drvs[] = {
        {
                PHY_ID_MATCH_EXACT(0x00008201),
                .name           = "RTL8201CP Ethernet",
-               .features       = PHY_BASIC_FEATURES,
        }, {
                PHY_ID_MATCH_EXACT(0x001cc816),
                .name           = "RTL8201F Fast Ethernet",
-               .features       = PHY_BASIC_FEATURES,
                .ack_interrupt  = &rtl8201_ack_interrupt,
                .config_intr    = &rtl8201_config_intr,
                .suspend        = genphy_suspend,
@@ -224,14 +211,12 @@ static struct phy_driver realtek_drvs[] = {
        }, {
                PHY_ID_MATCH_EXACT(0x001cc910),
                .name           = "RTL8211 Gigabit Ethernet",
-               .features       = PHY_GBIT_FEATURES,
                .config_aneg    = rtl8211_config_aneg,
                .read_mmd       = &genphy_read_mmd_unsupported,
                .write_mmd      = &genphy_write_mmd_unsupported,
        }, {
                PHY_ID_MATCH_EXACT(0x001cc912),
                .name           = "RTL8211B Gigabit Ethernet",
-               .features       = PHY_GBIT_FEATURES,
                .ack_interrupt  = &rtl821x_ack_interrupt,
                .config_intr    = &rtl8211b_config_intr,
                .read_mmd       = &genphy_read_mmd_unsupported,
@@ -241,14 +226,12 @@ static struct phy_driver realtek_drvs[] = {
        }, {
                PHY_ID_MATCH_EXACT(0x001cc913),
                .name           = "RTL8211C Gigabit Ethernet",
-               .features       = PHY_GBIT_FEATURES,
                .config_init    = rtl8211c_config_init,
                .read_mmd       = &genphy_read_mmd_unsupported,
                .write_mmd      = &genphy_write_mmd_unsupported,
        }, {
                PHY_ID_MATCH_EXACT(0x001cc914),
                .name           = "RTL8211DN Gigabit Ethernet",
-               .features       = PHY_GBIT_FEATURES,
                .ack_interrupt  = rtl821x_ack_interrupt,
                .config_intr    = rtl8211e_config_intr,
                .suspend        = genphy_suspend,
@@ -256,7 +239,6 @@ static struct phy_driver realtek_drvs[] = {
        }, {
                PHY_ID_MATCH_EXACT(0x001cc915),
                .name           = "RTL8211E Gigabit Ethernet",
-               .features       = PHY_GBIT_FEATURES,
                .ack_interrupt  = &rtl821x_ack_interrupt,
                .config_intr    = &rtl8211e_config_intr,
                .suspend        = genphy_suspend,
@@ -264,7 +246,6 @@ static struct phy_driver realtek_drvs[] = {
        }, {
                PHY_ID_MATCH_EXACT(0x001cc916),
                .name           = "RTL8211F Gigabit Ethernet",
-               .features       = PHY_GBIT_FEATURES,
                .config_init    = &rtl8211f_config_init,
                .ack_interrupt  = &rtl8211f_ack_interrupt,
                .config_intr    = &rtl8211f_config_intr,
@@ -275,8 +256,6 @@ static struct phy_driver realtek_drvs[] = {
        }, {
                PHY_ID_MATCH_EXACT(0x001cc800),
                .name           = "Generic Realtek PHY",
-               .features       = PHY_GBIT_FEATURES,
-               .config_init    = genphy_config_init,
                .suspend        = genphy_suspend,
                .resume         = genphy_resume,
                .read_page      = rtl821x_read_page,
@@ -284,7 +263,6 @@ static struct phy_driver realtek_drvs[] = {
        }, {
                PHY_ID_MATCH_EXACT(0x001cc961),
                .name           = "RTL8366RB Gigabit Ethernet",
-               .features       = PHY_GBIT_FEATURES,
                .config_init    = &rtl8366rb_config_init,
                /* These interrupts are handled by the irq controller
                 * embedded inside the RTL8366RB, they get unmasked when the
index 9053b1d01906b10ef7cdaa7a167918aca52b5180..52f1f65320fe035c3722df83b3809b258b0fc0e1 100644 (file)
@@ -175,7 +175,7 @@ static struct phy_driver rockchip_phy_driver[] = {
        .phy_id                 = INTERNAL_EPHY_ID,
        .phy_id_mask            = 0xfffffff0,
        .name                   = "Rockchip integrated EPHY",
-       .features               = PHY_BASIC_FEATURES,
+       /* PHY_BASIC_FEATURES */
        .flags                  = 0,
        .link_change_notify     = rockchip_link_change_notify,
        .soft_reset             = genphy_soft_reset,
index c94d3bfbc7727e825984558920eb58891bf17bfc..dc3d92d340c4d7a019c53706c96b0513cee13356 100644 (file)
@@ -214,7 +214,7 @@ static struct phy_driver smsc_phy_driver[] = {
        .phy_id_mask    = 0xfffffff0,
        .name           = "SMSC LAN83C185",
 
-       .features       = PHY_BASIC_FEATURES,
+       /* PHY_BASIC_FEATURES */
 
        .probe          = smsc_phy_probe,
 
@@ -233,7 +233,7 @@ static struct phy_driver smsc_phy_driver[] = {
        .phy_id_mask    = 0xfffffff0,
        .name           = "SMSC LAN8187",
 
-       .features       = PHY_BASIC_FEATURES,
+       /* PHY_BASIC_FEATURES */
 
        .probe          = smsc_phy_probe,
 
@@ -257,7 +257,7 @@ static struct phy_driver smsc_phy_driver[] = {
        .phy_id_mask    = 0xfffffff0,
        .name           = "SMSC LAN8700",
 
-       .features       = PHY_BASIC_FEATURES,
+       /* PHY_BASIC_FEATURES */
 
        .probe          = smsc_phy_probe,
 
@@ -282,7 +282,7 @@ static struct phy_driver smsc_phy_driver[] = {
        .phy_id_mask    = 0xfffffff0,
        .name           = "SMSC LAN911x Internal PHY",
 
-       .features       = PHY_BASIC_FEATURES,
+       /* PHY_BASIC_FEATURES */
 
        .probe          = smsc_phy_probe,
 
@@ -300,7 +300,7 @@ static struct phy_driver smsc_phy_driver[] = {
        .phy_id_mask    = 0xfffffff0,
        .name           = "SMSC LAN8710/LAN8720",
 
-       .features       = PHY_BASIC_FEATURES,
+       /* PHY_BASIC_FEATURES */
        .flags          = PHY_RST_AFTER_CLK_EN,
 
        .probe          = smsc_phy_probe,
@@ -326,7 +326,7 @@ static struct phy_driver smsc_phy_driver[] = {
        .phy_id_mask    = 0xfffffff0,
        .name           = "SMSC LAN8740",
 
-       .features       = PHY_BASIC_FEATURES,
+       /* PHY_BASIC_FEATURES */
 
        .probe          = smsc_phy_probe,
 
index 5b6acf431f9891a6c8bd93280f08fa753550b29b..d735a01380ed5926222565ace8f10685717a4ed8 100644 (file)
@@ -81,7 +81,7 @@ static struct phy_driver ste10xp_pdriver[] = {
        .phy_id = STE101P_PHY_ID,
        .phy_id_mask = 0xfffffff0,
        .name = "STe101p",
-       .features = PHY_BASIC_FEATURES,
+       /* PHY_BASIC_FEATURES */
        .config_init = ste10Xp_config_init,
        .ack_interrupt = ste10Xp_ack_interrupt,
        .config_intr = ste10Xp_config_intr,
@@ -91,7 +91,7 @@ static struct phy_driver ste10xp_pdriver[] = {
        .phy_id = STE100P_PHY_ID,
        .phy_id_mask = 0xffffffff,
        .name = "STe100p",
-       .features = PHY_BASIC_FEATURES,
+       /* PHY_BASIC_FEATURES */
        .config_init = ste10Xp_config_init,
        .ack_interrupt = ste10Xp_ack_interrupt,
        .config_intr = ste10Xp_config_intr,
index 219fc7cdc2b3bafcf43fdc42e983b588b0b1e036..a32b3fd8a3705c1fb7e9c8d894631f560dc1196c 100644 (file)
@@ -87,7 +87,7 @@ static struct phy_driver upd60620_driver[1] = { {
        .phy_id         = UPD60620_PHY_ID,
        .phy_id_mask    = 0xfffffffe,
        .name           = "Renesas uPD60620",
-       .features       = PHY_BASIC_FEATURES,
+       /* PHY_BASIC_FEATURES */
        .flags          = 0,
        .config_init    = upd60620_config_init,
        .read_status    = upd60620_read_status,
index dc0dd87a66948bcbbae9f346aaaf34ef59038b52..48a881918885552e3c7b41afdcc7c9f2dc921489 100644 (file)
@@ -389,7 +389,7 @@ static struct phy_driver vsc82xx_driver[] = {
        .phy_id         = PHY_ID_VSC8234,
        .name           = "Vitesse VSC8234",
        .phy_id_mask    = 0x000ffff0,
-       .features       = PHY_GBIT_FEATURES,
+       /* PHY_GBIT_FEATURES */
        .config_init    = &vsc824x_config_init,
        .config_aneg    = &vsc82x4_config_aneg,
        .ack_interrupt  = &vsc824x_ack_interrupt,
@@ -398,7 +398,7 @@ static struct phy_driver vsc82xx_driver[] = {
        .phy_id         = PHY_ID_VSC8244,
        .name           = "Vitesse VSC8244",
        .phy_id_mask    = 0x000fffc0,
-       .features       = PHY_GBIT_FEATURES,
+       /* PHY_GBIT_FEATURES */
        .config_init    = &vsc824x_config_init,
        .config_aneg    = &vsc82x4_config_aneg,
        .ack_interrupt  = &vsc824x_ack_interrupt,
@@ -416,7 +416,7 @@ static struct phy_driver vsc82xx_driver[] = {
        .phy_id         = PHY_ID_VSC8572,
        .name           = "Vitesse VSC8572",
        .phy_id_mask    = 0x000ffff0,
-       .features       = PHY_GBIT_FEATURES,
+       /* PHY_GBIT_FEATURES */
        .config_init    = &vsc824x_config_init,
        .config_aneg    = &vsc82x4_config_aneg,
        .ack_interrupt  = &vsc824x_ack_interrupt,
@@ -425,7 +425,7 @@ static struct phy_driver vsc82xx_driver[] = {
        .phy_id         = PHY_ID_VSC8601,
        .name           = "Vitesse VSC8601",
        .phy_id_mask    = 0x000ffff0,
-       .features       = PHY_GBIT_FEATURES,
+       /* PHY_GBIT_FEATURES */
        .config_init    = &vsc8601_config_init,
        .ack_interrupt  = &vsc824x_ack_interrupt,
        .config_intr    = &vsc82xx_config_intr,
@@ -433,7 +433,7 @@ static struct phy_driver vsc82xx_driver[] = {
        .phy_id         = PHY_ID_VSC7385,
        .name           = "Vitesse VSC7385",
        .phy_id_mask    = 0x000ffff0,
-       .features       = PHY_GBIT_FEATURES,
+       /* PHY_GBIT_FEATURES */
        .config_init    = vsc738x_config_init,
        .config_aneg    = vsc73xx_config_aneg,
        .read_page      = vsc73xx_read_page,
@@ -442,7 +442,7 @@ static struct phy_driver vsc82xx_driver[] = {
        .phy_id         = PHY_ID_VSC7388,
        .name           = "Vitesse VSC7388",
        .phy_id_mask    = 0x000ffff0,
-       .features       = PHY_GBIT_FEATURES,
+       /* PHY_GBIT_FEATURES */
        .config_init    = vsc738x_config_init,
        .config_aneg    = vsc73xx_config_aneg,
        .read_page      = vsc73xx_read_page,
@@ -451,7 +451,7 @@ static struct phy_driver vsc82xx_driver[] = {
        .phy_id         = PHY_ID_VSC7395,
        .name           = "Vitesse VSC7395",
        .phy_id_mask    = 0x000ffff0,
-       .features       = PHY_GBIT_FEATURES,
+       /* PHY_GBIT_FEATURES */
        .config_init    = vsc739x_config_init,
        .config_aneg    = vsc73xx_config_aneg,
        .read_page      = vsc73xx_read_page,
@@ -460,7 +460,7 @@ static struct phy_driver vsc82xx_driver[] = {
        .phy_id         = PHY_ID_VSC7398,
        .name           = "Vitesse VSC7398",
        .phy_id_mask    = 0x000ffff0,
-       .features       = PHY_GBIT_FEATURES,
+       /* PHY_GBIT_FEATURES */
        .config_init    = vsc739x_config_init,
        .config_aneg    = vsc73xx_config_aneg,
        .read_page      = vsc73xx_read_page,
@@ -469,7 +469,7 @@ static struct phy_driver vsc82xx_driver[] = {
        .phy_id         = PHY_ID_VSC8662,
        .name           = "Vitesse VSC8662",
        .phy_id_mask    = 0x000ffff0,
-       .features       = PHY_GBIT_FEATURES,
+       /* PHY_GBIT_FEATURES */
        .config_init    = &vsc824x_config_init,
        .config_aneg    = &vsc82x4_config_aneg,
        .ack_interrupt  = &vsc824x_ack_interrupt,
@@ -479,7 +479,7 @@ static struct phy_driver vsc82xx_driver[] = {
        .phy_id         = PHY_ID_VSC8221,
        .phy_id_mask    = 0x000ffff0,
        .name           = "Vitesse VSC8221",
-       .features       = PHY_GBIT_FEATURES,
+       /* PHY_GBIT_FEATURES */
        .config_init    = &vsc8221_config_init,
        .ack_interrupt  = &vsc824x_ack_interrupt,
        .config_intr    = &vsc82xx_config_intr,
@@ -488,7 +488,7 @@ static struct phy_driver vsc82xx_driver[] = {
        .phy_id         = PHY_ID_VSC8211,
        .phy_id_mask    = 0x000ffff0,
        .name           = "Vitesse VSC8211",
-       .features       = PHY_GBIT_FEATURES,
+       /* PHY_GBIT_FEATURES */
        .config_init    = &vsc8221_config_init,
        .ack_interrupt  = &vsc824x_ack_interrupt,
        .config_intr    = &vsc82xx_config_intr,
index a622ec33453a5313050378ff75a0dafa53054e9b..699a8870e92817f04b7865e56e804287ea4dcb24 100644 (file)
@@ -1244,6 +1244,23 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
                goto err_option_port_add;
        }
 
+       /* set promiscuity level to new slave */
+       if (dev->flags & IFF_PROMISC) {
+               err = dev_set_promiscuity(port_dev, 1);
+               if (err)
+                       goto err_set_slave_promisc;
+       }
+
+       /* set allmulti level to new slave */
+       if (dev->flags & IFF_ALLMULTI) {
+               err = dev_set_allmulti(port_dev, 1);
+               if (err) {
+                       if (dev->flags & IFF_PROMISC)
+                               dev_set_promiscuity(port_dev, -1);
+                       goto err_set_slave_promisc;
+               }
+       }
+
        netif_addr_lock_bh(dev);
        dev_uc_sync_multiple(port_dev, dev);
        dev_mc_sync_multiple(port_dev, dev);
@@ -1260,6 +1277,9 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
 
        return 0;
 
+err_set_slave_promisc:
+       __team_option_inst_del_port(team, port);
+
 err_option_port_add:
        team_upper_dev_unlink(team, port);
 
@@ -1305,6 +1325,12 @@ static int team_port_del(struct team *team, struct net_device *port_dev)
 
        team_port_disable(team, port);
        list_del_rcu(&port->list);
+
+       if (dev->flags & IFF_PROMISC)
+               dev_set_promiscuity(port_dev, -1);
+       if (dev->flags & IFF_ALLMULTI)
+               dev_set_allmulti(port_dev, -1);
+
        team_upper_dev_unlink(team, port);
        netdev_rx_handler_unregister(port_dev);
        team_port_disable_netpoll(port);
index 74bebbdb4b158791135410d00591e84eb9da7073..18c4e5d17b055b8efadb2ad0a993edfc6da6f377 100644 (file)
@@ -63,6 +63,7 @@ enum qmi_wwan_flags {
 
 enum qmi_wwan_quirks {
        QMI_WWAN_QUIRK_DTR = 1 << 0,    /* needs "set DTR" request */
+       QMI_WWAN_QUIRK_QUECTEL_DYNCFG = 1 << 1, /* check num. endpoints */
 };
 
 struct qmimux_hdr {
@@ -845,6 +846,16 @@ static const struct driver_info    qmi_wwan_info_quirk_dtr = {
        .data           = QMI_WWAN_QUIRK_DTR,
 };
 
+static const struct driver_info        qmi_wwan_info_quirk_quectel_dyncfg = {
+       .description    = "WWAN/QMI device",
+       .flags          = FLAG_WWAN | FLAG_SEND_ZLP,
+       .bind           = qmi_wwan_bind,
+       .unbind         = qmi_wwan_unbind,
+       .manage_power   = qmi_wwan_manage_power,
+       .rx_fixup       = qmi_wwan_rx_fixup,
+       .data           = QMI_WWAN_QUIRK_DTR | QMI_WWAN_QUIRK_QUECTEL_DYNCFG,
+};
+
 #define HUAWEI_VENDOR_ID       0x12D1
 
 /* map QMI/wwan function by a fixed interface number */
@@ -865,6 +876,15 @@ static const struct driver_info    qmi_wwan_info_quirk_dtr = {
 #define QMI_GOBI_DEVICE(vend, prod) \
        QMI_FIXED_INTF(vend, prod, 0)
 
+/* Quectel does not use fixed interface numbers on at least some of their
+ * devices. We need to check the number of endpoints to ensure that we bind to
+ * the correct interface.
+ */
+#define QMI_QUIRK_QUECTEL_DYNCFG(vend, prod) \
+       USB_DEVICE_AND_INTERFACE_INFO(vend, prod, USB_CLASS_VENDOR_SPEC, \
+                                     USB_SUBCLASS_VENDOR_SPEC, 0xff), \
+       .driver_info = (unsigned long)&qmi_wwan_info_quirk_quectel_dyncfg
+
 static const struct usb_device_id products[] = {
        /* 1. CDC ECM like devices match on the control interface */
        {       /* Huawei E392, E398 and possibly others sharing both device id and more... */
@@ -969,20 +989,9 @@ static const struct usb_device_id products[] = {
                USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x581d, USB_CLASS_VENDOR_SPEC, 1, 7),
                .driver_info = (unsigned long)&qmi_wwan_info,
        },
-       {       /* Quectel EP06/EG06/EM06 */
-               USB_DEVICE_AND_INTERFACE_INFO(0x2c7c, 0x0306,
-                                             USB_CLASS_VENDOR_SPEC,
-                                             USB_SUBCLASS_VENDOR_SPEC,
-                                             0xff),
-               .driver_info        = (unsigned long)&qmi_wwan_info_quirk_dtr,
-       },
-       {       /* Quectel EG12/EM12 */
-               USB_DEVICE_AND_INTERFACE_INFO(0x2c7c, 0x0512,
-                                             USB_CLASS_VENDOR_SPEC,
-                                             USB_SUBCLASS_VENDOR_SPEC,
-                                             0xff),
-               .driver_info        = (unsigned long)&qmi_wwan_info_quirk_dtr,
-       },
+       {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0125)},     /* Quectel EC25, EC20 R2.0  Mini PCIe */
+       {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0306)},     /* Quectel EP06/EG06/EM06 */
+       {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0512)},     /* Quectel EG12/EM12 */
 
        /* 3. Combined interface devices matching on interface number */
        {QMI_FIXED_INTF(0x0408, 0xea42, 4)},    /* Yota / Megafon M100-1 */
@@ -1203,6 +1212,7 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x19d2, 0x2002, 4)},    /* ZTE (Vodafone) K3765-Z */
        {QMI_FIXED_INTF(0x2001, 0x7e19, 4)},    /* D-Link DWM-221 B1 */
        {QMI_FIXED_INTF(0x2001, 0x7e35, 4)},    /* D-Link DWM-222 */
+       {QMI_FIXED_INTF(0x2020, 0x2031, 4)},    /* Olicard 600 */
        {QMI_FIXED_INTF(0x2020, 0x2033, 4)},    /* BroadMobi BM806U */
        {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)},    /* Sierra Wireless MC7700 */
        {QMI_FIXED_INTF(0x114f, 0x68a2, 8)},    /* Sierra Wireless MC7750 */
@@ -1270,7 +1280,6 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)},    /* HP lt4120 Snapdragon X5 LTE */
        {QMI_FIXED_INTF(0x22de, 0x9061, 3)},    /* WeTelecom WPD-600N */
        {QMI_QUIRK_SET_DTR(0x1e0e, 0x9001, 5)}, /* SIMCom 7100E, 7230E, 7600E ++ */
-       {QMI_QUIRK_SET_DTR(0x2c7c, 0x0125, 4)}, /* Quectel EC25, EC20 R2.0  Mini PCIe */
        {QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */
        {QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)}, /* Quectel EG91 */
        {QMI_FIXED_INTF(0x2c7c, 0x0296, 4)},    /* Quectel BG96 */
@@ -1350,27 +1359,12 @@ static bool quectel_ec20_detected(struct usb_interface *intf)
        return false;
 }
 
-static bool quectel_diag_detected(struct usb_interface *intf)
-{
-       struct usb_device *dev = interface_to_usbdev(intf);
-       struct usb_interface_descriptor intf_desc = intf->cur_altsetting->desc;
-       u16 id_vendor = le16_to_cpu(dev->descriptor.idVendor);
-       u16 id_product = le16_to_cpu(dev->descriptor.idProduct);
-
-       if (id_vendor != 0x2c7c || intf_desc.bNumEndpoints != 2)
-               return false;
-
-       if (id_product == 0x0306 || id_product == 0x0512)
-               return true;
-       else
-               return false;
-}
-
 static int qmi_wwan_probe(struct usb_interface *intf,
                          const struct usb_device_id *prod)
 {
        struct usb_device_id *id = (struct usb_device_id *)prod;
        struct usb_interface_descriptor *desc = &intf->cur_altsetting->desc;
+       const struct driver_info *info;
 
        /* Workaround to enable dynamic IDs.  This disables usbnet
         * blacklisting functionality.  Which, if required, can be
@@ -1404,10 +1398,14 @@ static int qmi_wwan_probe(struct usb_interface *intf,
         * we need to match on class/subclass/protocol. These values are
         * identical for the diagnostic- and QMI-interface, but bNumEndpoints is
         * different. Ignore the current interface if the number of endpoints
-        * the number for the diag interface (two).
+        * equals the number for the diag interface (two).
         */
-       if (quectel_diag_detected(intf))
-               return -ENODEV;
+       info = (void *)&id->driver_info;
+
+       if (info->data & QMI_WWAN_QUIRK_QUECTEL_DYNCFG) {
+               if (desc->bNumEndpoints == 2)
+                       return -ENODEV;
+       }
 
        return usbnet_probe(intf, id);
 }
index 86c8c64fbb0f33e920a5fbe77e2d73fe69c14991..6d63dcb73b2619ef6b1a10f8277dc21d8b787398 100644 (file)
@@ -1212,7 +1212,6 @@ static int vendor_mac_passthru_addr_read(struct r8152 *tp, struct sockaddr *sa)
                goto amacout;
        }
        memcpy(sa->sa_data, buf, 6);
-       ether_addr_copy(tp->netdev->dev_addr, sa->sa_data);
        netif_info(tp, probe, tp->netdev,
                   "Using pass-thru MAC addr %pM\n", sa->sa_data);
 
@@ -1221,43 +1220,55 @@ static int vendor_mac_passthru_addr_read(struct r8152 *tp, struct sockaddr *sa)
        return ret;
 }
 
-static int set_ethernet_addr(struct r8152 *tp)
+static int determine_ethernet_addr(struct r8152 *tp, struct sockaddr *sa)
 {
        struct net_device *dev = tp->netdev;
-       struct sockaddr sa;
        int ret;
 
        if (tp->version == RTL_VER_01) {
-               ret = pla_ocp_read(tp, PLA_IDR, 8, sa.sa_data);
+               ret = pla_ocp_read(tp, PLA_IDR, 8, sa->sa_data);
        } else {
                /* if device doesn't support MAC pass through this will
                 * be expected to be non-zero
                 */
-               ret = vendor_mac_passthru_addr_read(tp, &sa);
+               ret = vendor_mac_passthru_addr_read(tp, sa);
                if (ret < 0)
-                       ret = pla_ocp_read(tp, PLA_BACKUP, 8, sa.sa_data);
+                       ret = pla_ocp_read(tp, PLA_BACKUP, 8, sa->sa_data);
        }
 
        if (ret < 0) {
                netif_err(tp, probe, dev, "Get ether addr fail\n");
-       } else if (!is_valid_ether_addr(sa.sa_data)) {
+       } else if (!is_valid_ether_addr(sa->sa_data)) {
                netif_err(tp, probe, dev, "Invalid ether addr %pM\n",
-                         sa.sa_data);
+                         sa->sa_data);
                eth_hw_addr_random(dev);
-               ether_addr_copy(sa.sa_data, dev->dev_addr);
-               ret = rtl8152_set_mac_address(dev, &sa);
+               ether_addr_copy(sa->sa_data, dev->dev_addr);
                netif_info(tp, probe, dev, "Random ether addr %pM\n",
-                          sa.sa_data);
-       } else {
-               if (tp->version == RTL_VER_01)
-                       ether_addr_copy(dev->dev_addr, sa.sa_data);
-               else
-                       ret = rtl8152_set_mac_address(dev, &sa);
+                          sa->sa_data);
+               return 0;
        }
 
        return ret;
 }
 
+static int set_ethernet_addr(struct r8152 *tp)
+{
+       struct net_device *dev = tp->netdev;
+       struct sockaddr sa;
+       int ret;
+
+       ret = determine_ethernet_addr(tp, &sa);
+       if (ret < 0)
+               return ret;
+
+       if (tp->version == RTL_VER_01)
+               ether_addr_copy(dev->dev_addr, sa.sa_data);
+       else
+               ret = rtl8152_set_mac_address(dev, &sa);
+
+       return ret;
+}
+
 static void read_bulk_callback(struct urb *urb)
 {
        struct net_device *netdev;
@@ -4264,10 +4275,18 @@ static int rtl8152_post_reset(struct usb_interface *intf)
 {
        struct r8152 *tp = usb_get_intfdata(intf);
        struct net_device *netdev;
+       struct sockaddr sa;
 
        if (!tp)
                return 0;
 
+       /* reset the MAC adddress in case of policy change */
+       if (determine_ethernet_addr(tp, &sa) >= 0) {
+               rtnl_lock();
+               dev_set_mac_address (tp->netdev, &sa, NULL);
+               rtnl_unlock();
+       }
+
        netdev = tp->netdev;
        if (!netif_running(netdev))
                return 0;
index 569e87a51a3336cb630fe2d752546e867e62f318..09a1433b08332996163a5344d91f5de430890d2d 100644 (file)
@@ -162,18 +162,6 @@ static void veth_get_ethtool_stats(struct net_device *dev,
        }
 }
 
-static int veth_get_ts_info(struct net_device *dev,
-                           struct ethtool_ts_info *info)
-{
-       info->so_timestamping =
-               SOF_TIMESTAMPING_TX_SOFTWARE |
-               SOF_TIMESTAMPING_RX_SOFTWARE |
-               SOF_TIMESTAMPING_SOFTWARE;
-       info->phc_index = -1;
-
-       return 0;
-}
-
 static const struct ethtool_ops veth_ethtool_ops = {
        .get_drvinfo            = veth_get_drvinfo,
        .get_link               = ethtool_op_get_link,
@@ -181,7 +169,7 @@ static const struct ethtool_ops veth_ethtool_ops = {
        .get_sset_count         = veth_get_sset_count,
        .get_ethtool_stats      = veth_get_ethtool_stats,
        .get_link_ksettings     = veth_get_link_ksettings,
-       .get_ts_info            = veth_get_ts_info,
+       .get_ts_info            = ethtool_op_get_ts_info,
 };
 
 /* general routines */
index 1b03c4b6ebff3b2c4c0945957d6ec322eb45a5b2..559c48e66afc4904048d1486b636742eb14e4608 100644 (file)
@@ -31,7 +31,6 @@
 #include <linux/average.h>
 #include <linux/filter.h>
 #include <linux/kernel.h>
-#include <linux/pci.h>
 #include <net/route.h>
 #include <net/xdp.h>
 #include <net/net_failover.h>
@@ -1568,7 +1567,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
        struct send_queue *sq = &vi->sq[qnum];
        int err;
        struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum);
-       bool kick = !skb->xmit_more;
+       bool kick = !netdev_xmit_more();
        bool use_napi = sq->napi.weight;
 
        /* Free up any pending old buffers before queueing new ones. */
@@ -1588,7 +1587,8 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
                dev->stats.tx_fifo_errors++;
                if (net_ratelimit())
                        dev_warn(&dev->dev,
-                                "Unexpected TXQ (%d) queue failure: %d\n", qnum, err);
+                                "Unexpected TXQ (%d) queue failure: %d\n",
+                                qnum, err);
                dev->stats.tx_dropped++;
                dev_kfree_skb_any(skb);
                return NETDEV_TX_OK;
@@ -2384,7 +2384,7 @@ static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads)
 
        if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS,
                                  VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) {
-               dev_warn(&vi->dev->dev, "Fail to set guest offload. \n");
+               dev_warn(&vi->dev->dev, "Fail to set guest offload.\n");
                return -EINVAL;
        }
 
@@ -3115,8 +3115,9 @@ static int virtnet_probe(struct virtio_device *vdev)
                        /* Should never trigger: MTU was previously validated
                         * in virtnet_validate.
                         */
-                       dev_err(&vdev->dev, "device MTU appears to have changed "
-                               "it is now %d < %d", mtu, dev->min_mtu);
+                       dev_err(&vdev->dev,
+                               "device MTU appears to have changed it is now %d < %d",
+                               mtu, dev->min_mtu);
                        goto free;
                }
 
index 7c1430ed02445b6e6f13c663b555ef550276c899..ce711e5f8c5d43c9dc93a7337f1a6c3941ba3e69 100644 (file)
@@ -370,7 +370,7 @@ static int vrf_finish_output6(struct net *net, struct sock *sk,
                neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false);
        if (!IS_ERR(neigh)) {
                sock_confirm_neigh(skb, neigh);
-               ret = neigh_output(neigh, skb);
+               ret = neigh_output(neigh, skb, false);
                rcu_read_unlock_bh();
                return ret;
        }
@@ -549,7 +549,7 @@ static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *s
        struct net_device *dev = dst->dev;
        unsigned int hh_len = LL_RESERVED_SPACE(dev);
        struct neighbour *neigh;
-       u32 nexthop;
+       bool is_v6gw = false;
        int ret = -EINVAL;
 
        nf_reset(skb);
@@ -572,13 +572,11 @@ static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *s
 
        rcu_read_lock_bh();
 
-       nexthop = (__force u32)rt_nexthop(rt, ip_hdr(skb)->daddr);
-       neigh = __ipv4_neigh_lookup_noref(dev, nexthop);
-       if (unlikely(!neigh))
-               neigh = __neigh_create(&arp_tbl, &nexthop, dev, false);
+       neigh = ip_neigh_for_gw(rt, skb, &is_v6gw);
        if (!IS_ERR(neigh)) {
                sock_confirm_neigh(skb, neigh);
-               ret = neigh_output(neigh, skb);
+               /* if crossing protocols, can not use the cached header */
+               ret = neigh_output(neigh, skb, is_v6gw);
                rcu_read_unlock_bh();
                return ret;
        }
@@ -1273,9 +1271,14 @@ static void vrf_setup(struct net_device *dev)
 
        /* default to no qdisc; user can add if desired */
        dev->priv_flags |= IFF_NO_QUEUE;
+       dev->priv_flags |= IFF_NO_RX_HANDLER;
 
-       dev->min_mtu = 0;
-       dev->max_mtu = 0;
+       /* VRF devices do not care about MTU, but if the MTU is set
+        * too low then the ipv4 and ipv6 protocols are disabled
+        * which breaks networking.
+        */
+       dev->min_mtu = IPV6_MIN_MTU;
+       dev->max_mtu = ETH_MAX_MTU;
 }
 
 static int vrf_validate(struct nlattr *tb[], struct nlattr *data[],
index a20ea270d519be335b9b0086b1d5f9c8ea3d385d..1acc622d218333ac131666536b1077fb1b9ee808 100644 (file)
@@ -2728,7 +2728,7 @@ static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb)
                        num_msdus++;
                        num_bytes += ret;
                }
-               ieee80211_return_txq(hw, txq);
+               ieee80211_return_txq(hw, txq, false);
                ieee80211_txq_schedule_end(hw, txq->ac);
 
                record->num_msdus = cpu_to_le16(num_msdus);
index b73c23d4ce86d0cd0631a4838b4ce3a150e34f49..41e89db244d20e67f27d3f226bbceba750962fb0 100644 (file)
@@ -4089,7 +4089,7 @@ static int ath10k_mac_schedule_txq(struct ieee80211_hw *hw, u32 ac)
                        if (ret < 0)
                                break;
                }
-               ieee80211_return_txq(hw, txq);
+               ieee80211_return_txq(hw, txq, false);
                ath10k_htt_tx_txq_update(hw, txq);
                if (ret == -EBUSY)
                        break;
@@ -4374,7 +4374,7 @@ static void ath10k_mac_op_wake_tx_queue(struct ieee80211_hw *hw,
                if (ret < 0)
                        break;
        }
-       ieee80211_return_txq(hw, txq);
+       ieee80211_return_txq(hw, txq, false);
        ath10k_htt_tx_txq_update(hw, txq);
 out:
        ieee80211_txq_schedule_end(hw, ac);
index 773d428ff1b03328ca43c1c8db74103d8d846444..b17e1ca40995eab7b0f80c479f0cff7381801e76 100644 (file)
@@ -1938,12 +1938,15 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
                goto out;
 
        while ((queue = ieee80211_next_txq(hw, txq->mac80211_qnum))) {
+               bool force;
+
                tid = (struct ath_atx_tid *)queue->drv_priv;
 
                ret = ath_tx_sched_aggr(sc, txq, tid);
                ath_dbg(common, QUEUE, "ath_tx_sched_aggr returned %d\n", ret);
 
-               ieee80211_return_txq(hw, queue);
+               force = !skb_queue_empty(&tid->retry_q);
+               ieee80211_return_txq(hw, queue, force);
        }
 
 out:
index 3203aa72541a2cd837297b727c549ebe6af6cf0d..fc915ecfb06ebfefb4e4c24f0d5e3cbd157b06b7 100644 (file)
@@ -82,6 +82,7 @@
 #define IWL_22000_HR_A0_FW_PRE         "iwlwifi-QuQnj-a0-hr-a0-"
 #define IWL_22000_SU_Z0_FW_PRE         "iwlwifi-su-z0-"
 #define IWL_QU_B_JF_B_FW_PRE           "iwlwifi-Qu-b0-jf-b0-"
+#define IWL_QUZ_A_HR_B_FW_PRE          "iwlwifi-QuZ-a0-hr-b0-"
 #define IWL_QNJ_B_JF_B_FW_PRE          "iwlwifi-QuQnj-b0-jf-b0-"
 #define IWL_CC_A_FW_PRE                        "iwlwifi-cc-a0-"
 #define IWL_22000_SO_A_JF_B_FW_PRE     "iwlwifi-so-a0-jf-b0-"
        IWL_22000_HR_A0_FW_PRE __stringify(api) ".ucode"
 #define IWL_22000_SU_Z0_MODULE_FIRMWARE(api) \
        IWL_22000_SU_Z0_FW_PRE __stringify(api) ".ucode"
-#define IWL_QU_B_JF_B_MODULE_FIRMWARE(api) \
-       IWL_QU_B_JF_B_FW_PRE __stringify(api) ".ucode"
+#define IWL_QUZ_A_HR_B_MODULE_FIRMWARE(api) \
+       IWL_QUZ_A_HR_B_FW_PRE __stringify(api) ".ucode"
 #define IWL_QU_B_JF_B_MODULE_FIRMWARE(api) \
        IWL_QU_B_JF_B_FW_PRE __stringify(api) ".ucode"
 #define IWL_QNJ_B_JF_B_MODULE_FIRMWARE(api)            \
@@ -243,8 +244,20 @@ const struct iwl_cfg iwl_ax101_cfg_qu_hr = {
        .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
 };
 
-const struct iwl_cfg iwl22260_2ax_cfg = {
-       .name = "Intel(R) Wireless-AX 22260",
+const struct iwl_cfg iwl_ax101_cfg_quz_hr = {
+       .name = "Intel(R) Wi-Fi 6 AX101",
+       .fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE,
+       IWL_DEVICE_22500,
+       /*
+        * This device doesn't support receiving BlockAck with a large bitmap
+        * so we need to restrict the size of transmitted aggregation to the
+        * HT size; mac80211 would otherwise pick the HE max (256) by default.
+        */
+       .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+};
+
+const struct iwl_cfg iwl_ax200_cfg_cc = {
+       .name = "Intel(R) Wi-Fi 6 AX200 160MHz",
        .fw_name_pre = IWL_CC_A_FW_PRE,
        IWL_DEVICE_22500,
        /*
@@ -257,7 +270,7 @@ const struct iwl_cfg iwl22260_2ax_cfg = {
 };
 
 const struct iwl_cfg killer1650x_2ax_cfg = {
-       .name = "Killer(R) Wireless-AX 1650x Wireless Network Adapter (200NGW)",
+       .name = "Killer(R) Wi-Fi 6 AX1650x 160MHz Wireless Network Adapter (200NGW)",
        .fw_name_pre = IWL_CC_A_FW_PRE,
        IWL_DEVICE_22500,
        /*
@@ -270,7 +283,7 @@ const struct iwl_cfg killer1650x_2ax_cfg = {
 };
 
 const struct iwl_cfg killer1650w_2ax_cfg = {
-       .name = "Killer(R) Wireless-AX 1650w Wireless Network Adapter (200D2W)",
+       .name = "Killer(R) Wi-Fi 6 AX1650w 160MHz Wireless Network Adapter (200D2W)",
        .fw_name_pre = IWL_CC_A_FW_PRE,
        IWL_DEVICE_22500,
        /*
@@ -336,7 +349,7 @@ const struct iwl_cfg killer1550s_2ac_cfg_qu_b0_jf_b0 = {
 };
 
 const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0 = {
-       .name = "Killer(R) Wireless-AX 1650i Wireless Network Adapter (22560NGW)",
+       .name = "Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201NGW)",
        .fw_name_pre = IWL_22000_QU_B_HR_B_FW_PRE,
        IWL_DEVICE_22500,
        /*
@@ -348,7 +361,7 @@ const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0 = {
 };
 
 const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0 = {
-       .name = "Killer(R) Wireless-AX 1650s Wireless Network Adapter (22560D2W)",
+       .name = "Killer(R) Wi-Fi 6 AX1650s 160MHz Wireless Network Adapter (201D2W)",
        .fw_name_pre = IWL_22000_QU_B_HR_B_FW_PRE,
        IWL_DEVICE_22500,
        /*
@@ -460,6 +473,7 @@ MODULE_FIRMWARE(IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL_22000_SU_Z0_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL_QU_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_QUZ_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL_QNJ_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL_CC_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL_22000_SO_A_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
index 966caaf7a8a5c331b10c422f1dde431860870b10..be72529cc7891d2d291eae8668e190efe9944caf 100644 (file)
@@ -1700,6 +1700,7 @@ iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt,
        if (!range) {
                IWL_ERR(fwrt, "Failed to fill region header: id=%d, type=%d\n",
                        le32_to_cpu(reg->region_id), type);
+               memset(*data, 0, le32_to_cpu((*data)->len));
                return;
        }
 
@@ -1709,6 +1710,7 @@ iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt,
                if (range_size < 0) {
                        IWL_ERR(fwrt, "Failed to dump region: id=%d, type=%d\n",
                                le32_to_cpu(reg->region_id), type);
+                       memset(*data, 0, le32_to_cpu((*data)->len));
                        return;
                }
                range = range + range_size;
@@ -1897,12 +1899,12 @@ iwl_fw_error_ini_dump_file(struct iwl_fw_runtime *fwrt)
 
        trigger = fwrt->dump.active_trigs[id].trig;
 
-       size = sizeof(*dump_file);
-       size += iwl_fw_ini_get_trigger_len(fwrt, trigger);
-
+       size = iwl_fw_ini_get_trigger_len(fwrt, trigger);
        if (!size)
                return NULL;
 
+       size += sizeof(*dump_file);
+
        dump_file = vzalloc(size);
        if (!dump_file)
                return NULL;
@@ -2043,14 +2045,10 @@ int iwl_fw_dbg_error_collect(struct iwl_fw_runtime *fwrt,
        iwl_dump_error_desc->len = 0;
 
        ret = iwl_fw_dbg_collect_desc(fwrt, iwl_dump_error_desc, false, 0);
-       if (ret) {
+       if (ret)
                kfree(iwl_dump_error_desc);
-       } else {
-               set_bit(STATUS_FW_WAIT_DUMP, &fwrt->trans->status);
-
-               /* trigger nmi to halt the fw */
-               iwl_force_nmi(fwrt->trans);
-       }
+       else
+               iwl_trans_sync_nmi(fwrt->trans);
 
        return ret;
 }
@@ -2619,22 +2617,6 @@ IWL_EXPORT_SYMBOL(iwl_fw_dbg_apply_point);
 
 void iwl_fwrt_stop_device(struct iwl_fw_runtime *fwrt)
 {
-       /* if the wait event timeout elapses instead of wake up then
-        * the driver did not receive NMI interrupt and can not assume the FW
-        * is halted
-        */
-       int ret = wait_event_timeout(fwrt->trans->fw_halt_waitq,
-                                    !test_bit(STATUS_FW_WAIT_DUMP,
-                                              &fwrt->trans->status),
-                                    msecs_to_jiffies(2000));
-       if (!ret) {
-               /* failed to receive NMI interrupt, assuming the FW is stuck */
-               set_bit(STATUS_FW_ERROR, &fwrt->trans->status);
-
-               clear_bit(STATUS_FW_WAIT_DUMP, &fwrt->trans->status);
-       }
-
-       /* Assuming the op mode mutex is held at this point */
        iwl_fw_dbg_collect_sync(fwrt);
 
        iwl_trans_stop_device(fwrt->trans);
index 7adf4e4e841a92f3ae98534b011175e3cfb00ce7..12310e3d2fc5aa7b544b08c95ad3086c3de799c1 100644 (file)
@@ -76,7 +76,6 @@ void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans,
        fwrt->ops_ctx = ops_ctx;
        INIT_DELAYED_WORK(&fwrt->dump.wk, iwl_fw_error_dump_wk);
        iwl_fwrt_dbgfs_register(fwrt, dbgfs_dir);
-       init_waitqueue_head(&fwrt->trans->fw_halt_waitq);
 }
 IWL_EXPORT_SYMBOL(iwl_fw_runtime_init);
 
index 486b6daea37006faef339f293c3e64dbb8d0e111..0a93383791f3a1a96d218d903f1aff9cb2c8b5b3 100644 (file)
@@ -559,8 +559,9 @@ extern const struct iwl_cfg iwl22000_2ac_cfg_hr;
 extern const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb;
 extern const struct iwl_cfg iwl22000_2ac_cfg_jf;
 extern const struct iwl_cfg iwl_ax101_cfg_qu_hr;
+extern const struct iwl_cfg iwl_ax101_cfg_quz_hr;
 extern const struct iwl_cfg iwl22000_2ax_cfg_hr;
-extern const struct iwl_cfg iwl22260_2ax_cfg;
+extern const struct iwl_cfg iwl_ax200_cfg_cc;
 extern const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0;
 extern const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0;
 extern const struct iwl_cfg killer1650x_2ax_cfg;
index 1aa8744c06d0d406b490885ef839fbc074b82c7c..2b98ecdcf30109431313232c6953e866dc70c98f 100644 (file)
@@ -327,6 +327,7 @@ enum {
 #define CSR_HW_REV_TYPE_NONE           (0x00001F0)
 #define CSR_HW_REV_TYPE_QNJ            (0x0000360)
 #define CSR_HW_REV_TYPE_QNJ_B0         (0x0000364)
+#define CSR_HW_REV_TYPE_QUZ            (0x0000354)
 #define CSR_HW_REV_TYPE_HR_CDB         (0x0000340)
 #define CSR_HW_REV_TYPE_SO             (0x0000370)
 #define CSR_HW_REV_TYPE_TY             (0x0000420)
index 2235978adf70253fe2effdcba3d763e84867004d..1e4c9ef548ccf6f06a3ac56084437bcfbea4465c 100644 (file)
@@ -337,7 +337,6 @@ enum iwl_d3_status {
  *     are sent
  * @STATUS_TRANS_IDLE: the trans is idle - general commands are not to be sent
  * @STATUS_TRANS_DEAD: trans is dead - avoid any read/write operation
- * @STATUS_FW_WAIT_DUMP: if set, wait until cleared before collecting dump
  */
 enum iwl_trans_status {
        STATUS_SYNC_HCMD_ACTIVE,
@@ -350,7 +349,6 @@ enum iwl_trans_status {
        STATUS_TRANS_GOING_IDLE,
        STATUS_TRANS_IDLE,
        STATUS_TRANS_DEAD,
-       STATUS_FW_WAIT_DUMP,
 };
 
 static inline int
@@ -617,6 +615,7 @@ struct iwl_trans_ops {
        struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans,
                                                 u32 dump_mask);
        void (*debugfs_cleanup)(struct iwl_trans *trans);
+       void (*sync_nmi)(struct iwl_trans *trans);
 };
 
 /**
@@ -831,7 +830,6 @@ struct iwl_trans {
        u32 lmac_error_event_table[2];
        u32 umac_error_event_table;
        unsigned int error_event_table_tlv_status;
-       wait_queue_head_t fw_halt_waitq;
        bool hw_error;
 
        /* pointer to trans specific struct */
@@ -1240,10 +1238,12 @@ static inline void iwl_trans_fw_error(struct iwl_trans *trans)
        /* prevent double restarts due to the same erroneous FW */
        if (!test_and_set_bit(STATUS_FW_ERROR, &trans->status))
                iwl_op_mode_nic_error(trans->op_mode);
+}
 
-       if (test_and_clear_bit(STATUS_FW_WAIT_DUMP, &trans->status))
-               wake_up(&trans->fw_halt_waitq);
-
+static inline void iwl_trans_sync_nmi(struct iwl_trans *trans)
+{
+       if (trans->ops->sync_nmi)
+               trans->ops->sync_nmi(trans);
 }
 
 /*****************************************************
index bb2dc0be3621affb6fa3a09f465d94d4abbd7e47..d4c7f08f08e3e4738bb28d94223d0f1e1158a355 100644 (file)
@@ -2815,9 +2815,6 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
 
        iwl_mvm_mac_ctxt_remove(mvm, vif);
 
-       kfree(mvmvif->ap_wep_key);
-       mvmvif->ap_wep_key = NULL;
-
        mutex_unlock(&mvm->mutex);
 }
 
@@ -3284,24 +3281,7 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
                ret = iwl_mvm_update_sta(mvm, vif, sta);
        } else if (old_state == IEEE80211_STA_ASSOC &&
                   new_state == IEEE80211_STA_AUTHORIZED) {
-               /* if wep is used, need to set the key for the station now */
-               if (vif->type == NL80211_IFTYPE_AP && mvmvif->ap_wep_key) {
-                       mvm_sta->wep_key =
-                               kmemdup(mvmvif->ap_wep_key,
-                                       sizeof(*mvmvif->ap_wep_key) +
-                                       mvmvif->ap_wep_key->keylen,
-                                       GFP_KERNEL);
-                       if (!mvm_sta->wep_key) {
-                               ret = -ENOMEM;
-                               goto out_unlock;
-                       }
-
-                       ret = iwl_mvm_set_sta_key(mvm, vif, sta,
-                                                 mvm_sta->wep_key,
-                                                 STA_KEY_IDX_INVALID);
-               } else {
-                       ret = 0;
-               }
+               ret = 0;
 
                /* we don't support TDLS during DCM */
                if (iwl_mvm_phy_ctx_count(mvm) > 1)
@@ -3343,17 +3323,6 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
                                                   NL80211_TDLS_DISABLE_LINK);
                }
 
-               /* Remove STA key if this is an AP using WEP */
-               if (vif->type == NL80211_IFTYPE_AP && mvmvif->ap_wep_key) {
-                       int rm_ret = iwl_mvm_remove_sta_key(mvm, vif, sta,
-                                                           mvm_sta->wep_key);
-
-                       if (!ret)
-                               ret = rm_ret;
-                       kfree(mvm_sta->wep_key);
-                       mvm_sta->wep_key = NULL;
-               }
-
                if (unlikely(ret &&
                             test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
                                      &mvm->status)))
@@ -3390,6 +3359,13 @@ static void iwl_mvm_sta_rc_update(struct ieee80211_hw *hw,
                                  struct ieee80211_sta *sta, u32 changed)
 {
        struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+       if (changed & (IEEE80211_RC_BW_CHANGED |
+                      IEEE80211_RC_SUPP_RATES_CHANGED |
+                      IEEE80211_RC_NSS_CHANGED))
+               iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band,
+                                    true);
 
        if (vif->type == NL80211_IFTYPE_STATION &&
            changed & IEEE80211_RC_NSS_CHANGED)
@@ -3540,20 +3516,12 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
                break;
        case WLAN_CIPHER_SUITE_WEP40:
        case WLAN_CIPHER_SUITE_WEP104:
-               if (vif->type == NL80211_IFTYPE_AP) {
-                       struct iwl_mvm_vif *mvmvif =
-                               iwl_mvm_vif_from_mac80211(vif);
-
-                       mvmvif->ap_wep_key = kmemdup(key,
-                                                    sizeof(*key) + key->keylen,
-                                                    GFP_KERNEL);
-                       if (!mvmvif->ap_wep_key)
-                               return -ENOMEM;
-               }
-
-               if (vif->type != NL80211_IFTYPE_STATION)
-                       return 0;
-               break;
+               if (vif->type == NL80211_IFTYPE_STATION)
+                       break;
+               if (iwl_mvm_has_new_tx_api(mvm))
+                       return -EOPNOTSUPP;
+               /* support HW crypto on TX */
+               return 0;
        default:
                /* currently FW supports only one optional cipher scheme */
                if (hw->n_cipher_schemes &&
@@ -3641,12 +3609,17 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
                ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, key_offset);
                if (ret) {
                        IWL_WARN(mvm, "set key failed\n");
+                       key->hw_key_idx = STA_KEY_IDX_INVALID;
                        /*
                         * can't add key for RX, but we don't need it
-                        * in the device for TX so still return 0
+                        * in the device for TX so still return 0,
+                        * unless we have new TX API where we cannot
+                        * put key material into the TX_CMD
                         */
-                       key->hw_key_idx = STA_KEY_IDX_INVALID;
-                       ret = 0;
+                       if (iwl_mvm_has_new_tx_api(mvm))
+                               ret = -EOPNOTSUPP;
+                       else
+                               ret = 0;
                }
 
                break;
index cdac510fd22b3196b0f21a82621556f991ce2f54..8dc2a9850bc584c4a009caed8fd2c9725a7853ae 100644 (file)
@@ -501,7 +501,6 @@ struct iwl_mvm_vif {
        netdev_features_t features;
 
        struct iwl_probe_resp_data __rcu *probe_resp_data;
-       struct ieee80211_key_conf *ap_wep_key;
 };
 
 static inline struct iwl_mvm_vif *
index 47eddd6456abe15587c64180c5a30ea50687261c..eb452e9dce057823eec03bad00db2e6d42fedace 100644 (file)
@@ -8,7 +8,7 @@
  * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
  * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -1399,7 +1399,9 @@ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
 
                iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid);
                list_del_init(&mvmtxq->list);
+               local_bh_disable();
                iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
+               local_bh_enable();
        }
 
        mutex_unlock(&mvm->mutex);
@@ -2334,21 +2336,6 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
                iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg,
                                   timeout);
 
-       if (mvmvif->ap_wep_key) {
-               u8 key_offset = iwl_mvm_set_fw_key_idx(mvm);
-
-               __set_bit(key_offset, mvm->fw_key_table);
-
-               if (key_offset == STA_KEY_IDX_INVALID)
-                       return -ENOSPC;
-
-               ret = iwl_mvm_send_sta_key(mvm, mvmvif->mcast_sta.sta_id,
-                                          mvmvif->ap_wep_key, true, 0, NULL, 0,
-                                          key_offset, 0);
-               if (ret)
-                       return ret;
-       }
-
        return 0;
 }
 
@@ -2420,28 +2407,6 @@ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
 
        iwl_mvm_disable_txq(mvm, NULL, mvmvif->cab_queue, 0, 0);
 
-       if (mvmvif->ap_wep_key) {
-               int i;
-
-               if (!__test_and_clear_bit(mvmvif->ap_wep_key->hw_key_idx,
-                                         mvm->fw_key_table)) {
-                       IWL_ERR(mvm, "offset %d not used in fw key table.\n",
-                               mvmvif->ap_wep_key->hw_key_idx);
-                       return -ENOENT;
-               }
-
-               /* track which key was deleted last */
-               for (i = 0; i < STA_KEY_MAX_NUM; i++) {
-                       if (mvm->fw_key_deleted[i] < U8_MAX)
-                               mvm->fw_key_deleted[i]++;
-               }
-               mvm->fw_key_deleted[mvmvif->ap_wep_key->hw_key_idx] = 0;
-               ret = __iwl_mvm_remove_sta_key(mvm, mvmvif->mcast_sta.sta_id,
-                                              mvmvif->ap_wep_key, true);
-               if (ret)
-                       return ret;
-       }
-
        ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id);
        if (ret)
                IWL_WARN(mvm, "Failed sending remove station\n");
index 79700c7310a1a3cf38162d6ed3c582fe7c6ed67c..b4d4071b865db90dc81fd8c2db7d410b66686f30 100644 (file)
@@ -8,7 +8,7 @@
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
  * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
  * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -394,7 +394,6 @@ struct iwl_mvm_rxq_dup_data {
  *     the BA window. To be used for UAPSD only.
  * @ptk_pn: per-queue PTK PN data structures
  * @dup_data: per queue duplicate packet detection data
- * @wep_key: used in AP mode. Is a duplicate of the WEP key.
  * @deferred_traffic_tid_map: indication bitmap of deferred traffic per-TID
  * @tx_ant: the index of the antenna to use for data tx to this station. Only
  *     used during connection establishment (e.g. for the 4 way handshake
@@ -426,8 +425,6 @@ struct iwl_mvm_sta {
        struct iwl_mvm_key_pn __rcu *ptk_pn[4];
        struct iwl_mvm_rxq_dup_data *dup_data;
 
-       struct ieee80211_key_conf *wep_key;
-
        u8 reserved_queue;
 
        /* Temporary, until the new TLC will control the Tx protection */
index 0329b626ada64dff4a73f73522b206978f2fd341..70d0fa0eae2fc99343f2cf685e55d079a2fe5b02 100644 (file)
@@ -953,15 +953,16 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
        {IWL_PCI_DEVICE(0xA0F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)},
        {IWL_PCI_DEVICE(0xA0F0, 0x4070, iwl_ax101_cfg_qu_hr)},
 
-       {IWL_PCI_DEVICE(0x2723, 0x0080, iwl22260_2ax_cfg)},
-       {IWL_PCI_DEVICE(0x2723, 0x0084, iwl22260_2ax_cfg)},
-       {IWL_PCI_DEVICE(0x2723, 0x0088, iwl22260_2ax_cfg)},
-       {IWL_PCI_DEVICE(0x2723, 0x008C, iwl22260_2ax_cfg)},
+       {IWL_PCI_DEVICE(0x2723, 0x0080, iwl_ax200_cfg_cc)},
+       {IWL_PCI_DEVICE(0x2723, 0x0084, iwl_ax200_cfg_cc)},
+       {IWL_PCI_DEVICE(0x2723, 0x0088, iwl_ax200_cfg_cc)},
+       {IWL_PCI_DEVICE(0x2723, 0x008C, iwl_ax200_cfg_cc)},
        {IWL_PCI_DEVICE(0x2723, 0x1653, killer1650w_2ax_cfg)},
        {IWL_PCI_DEVICE(0x2723, 0x1654, killer1650x_2ax_cfg)},
-       {IWL_PCI_DEVICE(0x2723, 0x2080, iwl22260_2ax_cfg)},
-       {IWL_PCI_DEVICE(0x2723, 0x4080, iwl22260_2ax_cfg)},
-       {IWL_PCI_DEVICE(0x2723, 0x4088, iwl22260_2ax_cfg)},
+       {IWL_PCI_DEVICE(0x2723, 0x2080, iwl_ax200_cfg_cc)},
+       {IWL_PCI_DEVICE(0x2723, 0x4080, iwl_ax200_cfg_cc)},
+       {IWL_PCI_DEVICE(0x2723, 0x4088, iwl_ax200_cfg_cc)},
+
        {IWL_PCI_DEVICE(0x2725, 0x0090, iwlax210_2ax_cfg_so_hr_a0)},
        {IWL_PCI_DEVICE(0x7A70, 0x0090, iwlax210_2ax_cfg_so_hr_a0)},
        {IWL_PCI_DEVICE(0x7A70, 0x0310, iwlax210_2ax_cfg_so_hr_a0)},
index 860259e6555347644d499f54922b133ce7ae953f..4bf745c7bd6cba12e6ea52b15c78538230096fbf 100644 (file)
@@ -1027,7 +1027,7 @@ static inline bool iwl_pcie_dbg_on(struct iwl_trans *trans)
 
 void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state);
 void iwl_trans_pcie_dump_regs(struct iwl_trans *trans);
-void iwl_trans_sync_nmi(struct iwl_trans *trans);
+void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans);
 
 #ifdef CONFIG_IWLWIFI_DEBUGFS
 void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans);
index cfaad360c823474fd61a37382864c01411ac9ce4..c5baaae8d38e71875d7026759011c2d2fb53541f 100644 (file)
@@ -3325,7 +3325,8 @@ static void iwl_trans_pcie_resume(struct iwl_trans *trans)
        .unref = iwl_trans_pcie_unref,                                  \
        .dump_data = iwl_trans_pcie_dump_data,                          \
        .d3_suspend = iwl_trans_pcie_d3_suspend,                        \
-       .d3_resume = iwl_trans_pcie_d3_resume
+       .d3_resume = iwl_trans_pcie_d3_resume,                          \
+       .sync_nmi = iwl_trans_pcie_sync_nmi
 
 #ifdef CONFIG_PM_SLEEP
 #define IWL_TRANS_PM_OPS                                               \
@@ -3552,6 +3553,10 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
                }
        } else if (cfg == &iwl_ax101_cfg_qu_hr) {
                if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
+                   CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) &&
+                   trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0) {
+                       trans->cfg = &iwl22000_2ax_cfg_qnj_hr_b0;
+               } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
                    CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR)) {
                        trans->cfg = &iwl_ax101_cfg_qu_hr;
                } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
@@ -3570,7 +3575,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
                }
        } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
                   CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) &&
-                  (trans->cfg != &iwl22260_2ax_cfg ||
+                  (trans->cfg != &iwl_ax200_cfg_cc ||
                    trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0)) {
                u32 hw_status;
 
@@ -3647,7 +3652,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
        return ERR_PTR(ret);
 }
 
-void iwl_trans_sync_nmi(struct iwl_trans *trans)
+void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans)
 {
        unsigned long timeout = jiffies + IWL_TRANS_NMI_TIMEOUT;
 
index 88530d9f4a54ced4e6c8d081cedaf7b0354cde8b..38d11033898716b3e9c1c5fae581c692d4ae44fe 100644 (file)
@@ -965,7 +965,7 @@ static int iwl_pcie_gen2_send_hcmd_sync(struct iwl_trans *trans,
                               cmd_str);
                ret = -ETIMEDOUT;
 
-               iwl_trans_sync_nmi(trans);
+               iwl_trans_pcie_sync_nmi(trans);
                goto cancel;
        }
 
index bb0fc1c2b4f2fcb3520ad88afeb50720241a820c..4a9522fb682faa8a93f048e636a6777636348e05 100644 (file)
@@ -1962,7 +1962,7 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
                               iwl_get_cmd_string(trans, cmd->id));
                ret = -ETIMEDOUT;
 
-               iwl_trans_sync_nmi(trans);
+               iwl_trans_pcie_sync_nmi(trans);
                goto cancel;
        }
 
index 4cc7b222859c9ecd6b6ebad64ee14f5607f1cc44..7437faae7cf26395b706a598635eead4fcea2c6d 100644 (file)
@@ -2644,7 +2644,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
        enum nl80211_band band;
        const struct ieee80211_ops *ops = &mac80211_hwsim_ops;
        struct net *net;
-       int idx;
+       int idx, i;
        int n_limits = 0;
 
        if (WARN_ON(param->channels > 1 && !param->use_chanctx))
@@ -2768,12 +2768,23 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
                goto failed_hw;
        }
 
+       data->if_combination.max_interfaces = 0;
+       for (i = 0; i < n_limits; i++)
+               data->if_combination.max_interfaces +=
+                       data->if_limits[i].max;
+
        data->if_combination.n_limits = n_limits;
-       data->if_combination.max_interfaces = 2048;
        data->if_combination.limits = data->if_limits;
 
-       hw->wiphy->iface_combinations = &data->if_combination;
-       hw->wiphy->n_iface_combinations = 1;
+       /*
+        * If we actually were asked to support combinations,
+        * advertise them - if there's only a single thing like
+        * only IBSS then don't advertise it as combinations.
+        */
+       if (data->if_combination.max_interfaces > 1) {
+               hw->wiphy->iface_combinations = &data->if_combination;
+               hw->wiphy->n_iface_combinations = 1;
+       }
 
        if (param->ciphers) {
                memcpy(data->ciphers, param->ciphers,
index d54dda67d036c19cffce6bc30765c39dc93ee326..3af45949e868909e3073335cc302411c5e6c9761 100644 (file)
@@ -510,6 +510,8 @@ int mt7603_register_device(struct mt7603_dev *dev)
        bus_ops->rmw = mt7603_rmw;
        dev->mt76.bus = bus_ops;
 
+       spin_lock_init(&dev->ps_lock);
+
        INIT_DELAYED_WORK(&dev->mac_work, mt7603_mac_work);
        tasklet_init(&dev->pre_tbtt_tasklet, mt7603_pre_tbtt_tasklet,
                     (unsigned long)dev);
index 5e31d7da96fc88e5fab246c61ec1d37a328a8700..5abc02b578185a6467571f549987dd147e2b3d3b 100644 (file)
@@ -343,7 +343,7 @@ void mt7603_mac_rx_ba_reset(struct mt7603_dev *dev, void *addr, u8 tid)
                 MT_BA_CONTROL_1_RESET));
 }
 
-void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid, int ssn,
+void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid,
                            int ba_size)
 {
        u32 addr = mt7603_wtbl2_addr(wcid);
@@ -358,43 +358,6 @@ void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid, int ssn,
                mt76_clear(dev, addr + (15 * 4), tid_mask);
                return;
        }
-       mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000);
-
-       mt7603_mac_stop(dev);
-       switch (tid) {
-       case 0:
-               mt76_rmw_field(dev, addr + (2 * 4), MT_WTBL2_W2_TID0_SN, ssn);
-               break;
-       case 1:
-               mt76_rmw_field(dev, addr + (2 * 4), MT_WTBL2_W2_TID1_SN, ssn);
-               break;
-       case 2:
-               mt76_rmw_field(dev, addr + (2 * 4), MT_WTBL2_W2_TID2_SN_LO,
-                              ssn);
-               mt76_rmw_field(dev, addr + (3 * 4), MT_WTBL2_W3_TID2_SN_HI,
-                              ssn >> 8);
-               break;
-       case 3:
-               mt76_rmw_field(dev, addr + (3 * 4), MT_WTBL2_W3_TID3_SN, ssn);
-               break;
-       case 4:
-               mt76_rmw_field(dev, addr + (3 * 4), MT_WTBL2_W3_TID4_SN, ssn);
-               break;
-       case 5:
-               mt76_rmw_field(dev, addr + (3 * 4), MT_WTBL2_W3_TID5_SN_LO,
-                              ssn);
-               mt76_rmw_field(dev, addr + (4 * 4), MT_WTBL2_W4_TID5_SN_HI,
-                              ssn >> 4);
-               break;
-       case 6:
-               mt76_rmw_field(dev, addr + (4 * 4), MT_WTBL2_W4_TID6_SN, ssn);
-               break;
-       case 7:
-               mt76_rmw_field(dev, addr + (4 * 4), MT_WTBL2_W4_TID7_SN, ssn);
-               break;
-       }
-       mt7603_wtbl_update(dev, wcid, MT_WTBL_UPDATE_WTBL2);
-       mt7603_mac_start(dev);
 
        for (i = 7; i > 0; i--) {
                if (ba_size >= MT_AGG_SIZE_LIMIT(i))
@@ -827,6 +790,7 @@ mt7603_mac_write_txwi(struct mt7603_dev *dev, __le32 *txwi,
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
        struct ieee80211_tx_rate *rate = &info->control.rates[0];
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+       struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
        struct ieee80211_vif *vif = info->control.vif;
        struct mt7603_vif *mvif;
        int wlan_idx;
@@ -834,6 +798,7 @@ mt7603_mac_write_txwi(struct mt7603_dev *dev, __le32 *txwi,
        int tx_count = 8;
        u8 frame_type, frame_subtype;
        u16 fc = le16_to_cpu(hdr->frame_control);
+       u16 seqno = 0;
        u8 vif_idx = 0;
        u32 val;
        u8 bw;
@@ -919,7 +884,17 @@ mt7603_mac_write_txwi(struct mt7603_dev *dev, __le32 *txwi,
                tx_count = 0x1f;
 
        val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count) |
-             FIELD_PREP(MT_TXD3_SEQ, le16_to_cpu(hdr->seq_ctrl));
+                 MT_TXD3_SN_VALID;
+
+       if (ieee80211_is_data_qos(hdr->frame_control))
+               seqno = le16_to_cpu(hdr->seq_ctrl);
+       else if (ieee80211_is_back_req(hdr->frame_control))
+               seqno = le16_to_cpu(bar->start_seq_num);
+       else
+               val &= ~MT_TXD3_SN_VALID;
+
+       val |= FIELD_PREP(MT_TXD3_SEQ, seqno >> 4);
+
        txwi[3] = cpu_to_le32(val);
 
        if (key) {
index cc0fe0933b2d8043e622f1b513817b6528bbcaae..a3c4ef198bfeea965fb3f8d71e9d622cc546bb1a 100644 (file)
@@ -372,7 +372,7 @@ mt7603_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps)
        struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv;
        struct sk_buff_head list;
 
-       mt76_stop_tx_queues(&dev->mt76, sta, false);
+       mt76_stop_tx_queues(&dev->mt76, sta, true);
        mt7603_wtbl_set_ps(dev, msta, ps);
        if (ps)
                return;
@@ -584,13 +584,13 @@ mt7603_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
        case IEEE80211_AMPDU_TX_OPERATIONAL:
                mtxq->aggr = true;
                mtxq->send_bar = false;
-               mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, *ssn, ba_size);
+               mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, ba_size);
                break;
        case IEEE80211_AMPDU_TX_STOP_FLUSH:
        case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
                mtxq->aggr = false;
                ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn);
-               mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, *ssn, -1);
+               mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, -1);
                break;
        case IEEE80211_AMPDU_TX_START:
                mtxq->agg_ssn = *ssn << 4;
@@ -598,7 +598,7 @@ mt7603_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
                break;
        case IEEE80211_AMPDU_TX_STOP_CONT:
                mtxq->aggr = false;
-               mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, *ssn, -1);
+               mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, -1);
                ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
                break;
        }
index 79f3324294328b0f5b842a98ea23115b96470ca2..6049f3b7c8fec429de86329d35662c4659f711ee 100644 (file)
@@ -200,7 +200,7 @@ void mt7603_beacon_set_timer(struct mt7603_dev *dev, int idx, int intval);
 int mt7603_mac_fill_rx(struct mt7603_dev *dev, struct sk_buff *skb);
 void mt7603_mac_add_txs(struct mt7603_dev *dev, void *data);
 void mt7603_mac_rx_ba_reset(struct mt7603_dev *dev, void *addr, u8 tid);
-void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid, int ssn,
+void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid,
                            int ba_size);
 
 void mt7603_pse_client_reset(struct mt7603_dev *dev);
index 9ed231abe91676119d751b06cfa995a7f5dd716c..4fe5a83ca5a41713d894a4210fe5ef0d68e47e17 100644 (file)
@@ -466,7 +466,6 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev,
                return;
 
        rcu_read_lock();
-       mt76_tx_status_lock(mdev, &list);
 
        if (stat->wcid < ARRAY_SIZE(dev->mt76.wcid))
                wcid = rcu_dereference(dev->mt76.wcid[stat->wcid]);
@@ -479,6 +478,8 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev,
                                          drv_priv);
        }
 
+       mt76_tx_status_lock(mdev, &list);
+
        if (wcid) {
                if (stat->pktid >= MT_PACKET_ID_FIRST)
                        status.skb = mt76_tx_status_skb_get(mdev, wcid,
@@ -498,7 +499,9 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev,
                if (*update == 0 && stat_val == stat_cache &&
                    stat->wcid == msta->status.wcid && msta->n_frames < 32) {
                        msta->n_frames++;
-                       goto out;
+                       mt76_tx_status_unlock(mdev, &list);
+                       rcu_read_unlock();
+                       return;
                }
 
                mt76x02_mac_fill_tx_status(dev, status.info, &msta->status,
@@ -514,11 +517,10 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev,
 
        if (status.skb)
                mt76_tx_status_skb_done(mdev, status.skb, &list);
-       else
-               ieee80211_tx_status_ext(mt76_hw(dev), &status);
-
-out:
        mt76_tx_status_unlock(mdev, &list);
+
+       if (!status.skb)
+               ieee80211_tx_status_ext(mt76_hw(dev), &status);
        rcu_read_unlock();
 }
 
index 4b1744e9fb78a08c59fe0ac71d0d9962ae6761be..50b92ca92bd75c33d783ed9bfdf0f01f7d5ce0ae 100644 (file)
@@ -673,7 +673,6 @@ enum rt2x00_state_flags {
        CONFIG_CHANNEL_HT40,
        CONFIG_POWERSAVING,
        CONFIG_HT_DISABLED,
-       CONFIG_QOS_DISABLED,
        CONFIG_MONITORING,
 
        /*
index 2825560e2424dbc766c5d5489491ff7dc67c5211..e8462f25d2522c4dbe95215b3de0279213cdc2b4 100644 (file)
@@ -642,18 +642,8 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
                        rt2x00dev->intf_associated--;
 
                rt2x00leds_led_assoc(rt2x00dev, !!rt2x00dev->intf_associated);
-
-               clear_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags);
        }
 
-       /*
-        * Check for access point which do not support 802.11e . We have to
-        * generate data frames sequence number in S/W for such AP, because
-        * of H/W bug.
-        */
-       if (changes & BSS_CHANGED_QOS && !bss_conf->qos)
-               set_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags);
-
        /*
         * When the erp information has changed, we should perform
         * additional configuration steps. For all other changes we are done.
index 92ddc19e7bf747a23d0eb24c15b05ff111751754..4834b4eb0206408093a54d47b2a6a5831aa75674 100644 (file)
@@ -201,15 +201,18 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
        if (!rt2x00_has_cap_flag(rt2x00dev, REQUIRE_SW_SEQNO)) {
                /*
                 * rt2800 has a H/W (or F/W) bug, device incorrectly increase
-                * seqno on retransmited data (non-QOS) frames. To workaround
-                * the problem let's generate seqno in software if QOS is
-                * disabled.
+                * seqno on retransmitted data (non-QOS) and management frames.
+                * To workaround the problem let's generate seqno in software.
+                * Except for beacons which are transmitted periodically by H/W
+                * hence hardware has to assign seqno for them.
                 */
-               if (test_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags))
-                       __clear_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
-               else
+               if (ieee80211_is_beacon(hdr->frame_control)) {
+                       __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
                        /* H/W will generate sequence number */
                        return;
+               }
+
+               __clear_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
        }
 
        /*
index 936c0b3e0ba28ec1f6586a5bab86d403a86d0dd6..05847eb91a1b4a24c29567384d0a1b7094f56235 100644 (file)
@@ -248,6 +248,22 @@ struct xenvif_hash {
        struct xenvif_hash_cache cache;
 };
 
+struct backend_info {
+       struct xenbus_device *dev;
+       struct xenvif *vif;
+
+       /* This is the state that will be reflected in xenstore when any
+        * active hotplug script completes.
+        */
+       enum xenbus_state state;
+
+       enum xenbus_state frontend_state;
+       struct xenbus_watch hotplug_status_watch;
+       u8 have_hotplug_status_watch:1;
+
+       const char *hotplug_script;
+};
+
 struct xenvif {
        /* Unique identifier for this interface. */
        domid_t          domid;
@@ -283,6 +299,8 @@ struct xenvif {
        struct xenbus_watch credit_watch;
        struct xenbus_watch mcast_ctrl_watch;
 
+       struct backend_info *be;
+
        spinlock_t lock;
 
 #ifdef CONFIG_DEBUG_FS
index 330ddb64930f98f694a40afd72acb8f78515aca9..41c9e8f2e52032d24309f6971e8c947b5b949a93 100644 (file)
 #include <linux/vmalloc.h>
 #include <linux/rtnetlink.h>
 
-struct backend_info {
-       struct xenbus_device *dev;
-       struct xenvif *vif;
-
-       /* This is the state that will be reflected in xenstore when any
-        * active hotplug script completes.
-        */
-       enum xenbus_state state;
-
-       enum xenbus_state frontend_state;
-       struct xenbus_watch hotplug_status_watch;
-       u8 have_hotplug_status_watch:1;
-
-       const char *hotplug_script;
-};
-
 static int connect_data_rings(struct backend_info *be,
                              struct xenvif_queue *queue);
 static void connect(struct backend_info *be);
@@ -472,6 +456,7 @@ static int backend_create_xenvif(struct backend_info *be)
                return err;
        }
        be->vif = vif;
+       vif->be = be;
 
        kobject_uevent(&dev->dev.kobj, KOBJ_ONLINE);
        return 0;
index 80c30321de4148ecf0b3a5fc9d246877c01577e5..8d33970a2950ea824831426cc4ef935dd06e6a6a 100644 (file)
@@ -2037,7 +2037,7 @@ static void netback_changed(struct xenbus_device *dev,
        case XenbusStateClosed:
                if (dev->state == XenbusStateClosed)
                        break;
-               /* Missed the backend's CLOSING state -- fallthrough */
+               /* Fall through - Missed the backend's CLOSING state. */
        case XenbusStateClosing:
                xenbus_frontend_closed(dev);
                break;
index b72a303176c70962e04f8304a816c78f812512c1..9486acc08402db3a17079c0ec2589ce445bb23d2 100644 (file)
@@ -198,14 +198,15 @@ static struct device *__nd_btt_create(struct nd_region *nd_region,
                return NULL;
 
        nd_btt->id = ida_simple_get(&nd_region->btt_ida, 0, 0, GFP_KERNEL);
-       if (nd_btt->id < 0) {
-               kfree(nd_btt);
-               return NULL;
-       }
+       if (nd_btt->id < 0)
+               goto out_nd_btt;
 
        nd_btt->lbasize = lbasize;
-       if (uuid)
+       if (uuid) {
                uuid = kmemdup(uuid, 16, GFP_KERNEL);
+               if (!uuid)
+                       goto out_put_id;
+       }
        nd_btt->uuid = uuid;
        dev = &nd_btt->dev;
        dev_set_name(dev, "btt%d.%d", nd_region->id, nd_btt->id);
@@ -220,6 +221,13 @@ static struct device *__nd_btt_create(struct nd_region *nd_region,
                return NULL;
        }
        return dev;
+
+out_put_id:
+       ida_simple_remove(&nd_region->btt_ida, nd_btt->id);
+
+out_nd_btt:
+       kfree(nd_btt);
+       return NULL;
 }
 
 struct device *nd_btt_create(struct nd_region *nd_region)
index 7849bf1812c47e64f76e16c0ccf8f0ccc6f3bc25..f293556cbbf6d747004b132a23c440296ec760f7 100644 (file)
@@ -2249,9 +2249,12 @@ static struct device *create_namespace_blk(struct nd_region *nd_region,
        if (!nsblk->uuid)
                goto blk_err;
        memcpy(name, nd_label->name, NSLABEL_NAME_LEN);
-       if (name[0])
+       if (name[0]) {
                nsblk->alt_name = kmemdup(name, NSLABEL_NAME_LEN,
                                GFP_KERNEL);
+               if (!nsblk->alt_name)
+                       goto blk_err;
+       }
        res = nsblk_add_resource(nd_region, ndd, nsblk,
                        __le64_to_cpu(nd_label->dpa));
        if (!res)
index bc2f700feef8abdad873197237f34f765055c22f..0279eb1da3ef5ae40c5ab80ef6940732dca03bf0 100644 (file)
@@ -113,13 +113,13 @@ static void write_pmem(void *pmem_addr, struct page *page,
 
        while (len) {
                mem = kmap_atomic(page);
-               chunk = min_t(unsigned int, len, PAGE_SIZE);
+               chunk = min_t(unsigned int, len, PAGE_SIZE - off);
                memcpy_flushcache(pmem_addr, mem + off, chunk);
                kunmap_atomic(mem);
                len -= chunk;
                off = 0;
                page++;
-               pmem_addr += PAGE_SIZE;
+               pmem_addr += chunk;
        }
 }
 
@@ -132,7 +132,7 @@ static blk_status_t read_pmem(struct page *page, unsigned int off,
 
        while (len) {
                mem = kmap_atomic(page);
-               chunk = min_t(unsigned int, len, PAGE_SIZE);
+               chunk = min_t(unsigned int, len, PAGE_SIZE - off);
                rem = memcpy_mcsafe(mem + off, pmem_addr, chunk);
                kunmap_atomic(mem);
                if (rem)
@@ -140,7 +140,7 @@ static blk_status_t read_pmem(struct page *page, unsigned int off,
                len -= chunk;
                off = 0;
                page++;
-               pmem_addr += PAGE_SIZE;
+               pmem_addr += chunk;
        }
        return BLK_STS_OK;
 }
index f8bb746a549f7b993dcf61f052acde8303d11cae..a570f2263a424e96908c559750454a086a3df3e2 100644 (file)
@@ -22,6 +22,8 @@ static bool key_revalidate = true;
 module_param(key_revalidate, bool, 0444);
 MODULE_PARM_DESC(key_revalidate, "Require key validation at init.");
 
+static const char zero_key[NVDIMM_PASSPHRASE_LEN];
+
 static void *key_data(struct key *key)
 {
        struct encrypted_key_payload *epayload = dereference_key_locked(key);
@@ -75,6 +77,16 @@ static struct key *nvdimm_request_key(struct nvdimm *nvdimm)
        return key;
 }
 
+static const void *nvdimm_get_key_payload(struct nvdimm *nvdimm,
+               struct key **key)
+{
+       *key = nvdimm_request_key(nvdimm);
+       if (!*key)
+               return zero_key;
+
+       return key_data(*key);
+}
+
 static struct key *nvdimm_lookup_user_key(struct nvdimm *nvdimm,
                key_serial_t id, int subclass)
 {
@@ -105,36 +117,57 @@ static struct key *nvdimm_lookup_user_key(struct nvdimm *nvdimm,
        return key;
 }
 
-static struct key *nvdimm_key_revalidate(struct nvdimm *nvdimm)
+static const void *nvdimm_get_user_key_payload(struct nvdimm *nvdimm,
+               key_serial_t id, int subclass, struct key **key)
+{
+       *key = NULL;
+       if (id == 0) {
+               if (subclass == NVDIMM_BASE_KEY)
+                       return zero_key;
+               else
+                       return NULL;
+       }
+
+       *key = nvdimm_lookup_user_key(nvdimm, id, subclass);
+       if (!*key)
+               return NULL;
+
+       return key_data(*key);
+}
+
+
+static int nvdimm_key_revalidate(struct nvdimm *nvdimm)
 {
        struct key *key;
        int rc;
+       const void *data;
 
        if (!nvdimm->sec.ops->change_key)
-               return NULL;
+               return -EOPNOTSUPP;
 
-       key = nvdimm_request_key(nvdimm);
-       if (!key)
-               return NULL;
+       data = nvdimm_get_key_payload(nvdimm, &key);
 
        /*
         * Send the same key to the hardware as new and old key to
         * verify that the key is good.
         */
-       rc = nvdimm->sec.ops->change_key(nvdimm, key_data(key),
-                       key_data(key), NVDIMM_USER);
+       rc = nvdimm->sec.ops->change_key(nvdimm, data, data, NVDIMM_USER);
        if (rc < 0) {
                nvdimm_put_key(key);
-               key = NULL;
+               return rc;
        }
-       return key;
+
+       nvdimm_put_key(key);
+       nvdimm->sec.state = nvdimm_security_state(nvdimm, NVDIMM_USER);
+       return 0;
 }
 
 static int __nvdimm_security_unlock(struct nvdimm *nvdimm)
 {
        struct device *dev = &nvdimm->dev;
        struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
-       struct key *key = NULL;
+       struct key *key;
+       const void *data;
        int rc;
 
        /* The bus lock should be held at the top level of the call stack */
@@ -160,16 +193,11 @@ static int __nvdimm_security_unlock(struct nvdimm *nvdimm)
                if (!key_revalidate)
                        return 0;
 
-               key = nvdimm_key_revalidate(nvdimm);
-               if (!key)
-                       return nvdimm_security_freeze(nvdimm);
+               return nvdimm_key_revalidate(nvdimm);
        } else
-               key = nvdimm_request_key(nvdimm);
+               data = nvdimm_get_key_payload(nvdimm, &key);
 
-       if (!key)
-               return -ENOKEY;
-
-       rc = nvdimm->sec.ops->unlock(nvdimm, key_data(key));
+       rc = nvdimm->sec.ops->unlock(nvdimm, data);
        dev_dbg(dev, "key: %d unlock: %s\n", key_serial(key),
                        rc == 0 ? "success" : "fail");
 
@@ -195,6 +223,7 @@ int nvdimm_security_disable(struct nvdimm *nvdimm, unsigned int keyid)
        struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
        struct key *key;
        int rc;
+       const void *data;
 
        /* The bus lock should be held at the top level of the call stack */
        lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
@@ -214,11 +243,12 @@ int nvdimm_security_disable(struct nvdimm *nvdimm, unsigned int keyid)
                return -EBUSY;
        }
 
-       key = nvdimm_lookup_user_key(nvdimm, keyid, NVDIMM_BASE_KEY);
-       if (!key)
+       data = nvdimm_get_user_key_payload(nvdimm, keyid,
+                       NVDIMM_BASE_KEY, &key);
+       if (!data)
                return -ENOKEY;
 
-       rc = nvdimm->sec.ops->disable(nvdimm, key_data(key));
+       rc = nvdimm->sec.ops->disable(nvdimm, data);
        dev_dbg(dev, "key: %d disable: %s\n", key_serial(key),
                        rc == 0 ? "success" : "fail");
 
@@ -235,6 +265,7 @@ int nvdimm_security_update(struct nvdimm *nvdimm, unsigned int keyid,
        struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
        struct key *key, *newkey;
        int rc;
+       const void *data, *newdata;
 
        /* The bus lock should be held at the top level of the call stack */
        lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
@@ -249,22 +280,19 @@ int nvdimm_security_update(struct nvdimm *nvdimm, unsigned int keyid,
                return -EIO;
        }
 
-       if (keyid == 0)
-               key = NULL;
-       else {
-               key = nvdimm_lookup_user_key(nvdimm, keyid, NVDIMM_BASE_KEY);
-               if (!key)
-                       return -ENOKEY;
-       }
+       data = nvdimm_get_user_key_payload(nvdimm, keyid,
+                       NVDIMM_BASE_KEY, &key);
+       if (!data)
+               return -ENOKEY;
 
-       newkey = nvdimm_lookup_user_key(nvdimm, new_keyid, NVDIMM_NEW_KEY);
-       if (!newkey) {
+       newdata = nvdimm_get_user_key_payload(nvdimm, new_keyid,
+                       NVDIMM_NEW_KEY, &newkey);
+       if (!newdata) {
                nvdimm_put_key(key);
                return -ENOKEY;
        }
 
-       rc = nvdimm->sec.ops->change_key(nvdimm, key ? key_data(key) : NULL,
-                       key_data(newkey), pass_type);
+       rc = nvdimm->sec.ops->change_key(nvdimm, data, newdata, pass_type);
        dev_dbg(dev, "key: %d %d update%s: %s\n",
                        key_serial(key), key_serial(newkey),
                        pass_type == NVDIMM_MASTER ? "(master)" : "(user)",
@@ -286,8 +314,9 @@ int nvdimm_security_erase(struct nvdimm *nvdimm, unsigned int keyid,
 {
        struct device *dev = &nvdimm->dev;
        struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
-       struct key *key;
+       struct key *key = NULL;
        int rc;
+       const void *data;
 
        /* The bus lock should be held at the top level of the call stack */
        lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
@@ -319,11 +348,12 @@ int nvdimm_security_erase(struct nvdimm *nvdimm, unsigned int keyid,
                return -EOPNOTSUPP;
        }
 
-       key = nvdimm_lookup_user_key(nvdimm, keyid, NVDIMM_BASE_KEY);
-       if (!key)
+       data = nvdimm_get_user_key_payload(nvdimm, keyid,
+                       NVDIMM_BASE_KEY, &key);
+       if (!data)
                return -ENOKEY;
 
-       rc = nvdimm->sec.ops->erase(nvdimm, key_data(key), pass_type);
+       rc = nvdimm->sec.ops->erase(nvdimm, data, pass_type);
        dev_dbg(dev, "key: %d erase%s: %s\n", key_serial(key),
                        pass_type == NVDIMM_MASTER ? "(master)" : "(user)",
                        rc == 0 ? "success" : "fail");
@@ -337,8 +367,9 @@ int nvdimm_security_overwrite(struct nvdimm *nvdimm, unsigned int keyid)
 {
        struct device *dev = &nvdimm->dev;
        struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
-       struct key *key;
+       struct key *key = NULL;
        int rc;
+       const void *data;
 
        /* The bus lock should be held at the top level of the call stack */
        lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
@@ -368,15 +399,12 @@ int nvdimm_security_overwrite(struct nvdimm *nvdimm, unsigned int keyid)
                return -EBUSY;
        }
 
-       if (keyid == 0)
-               key = NULL;
-       else {
-               key = nvdimm_lookup_user_key(nvdimm, keyid, NVDIMM_BASE_KEY);
-               if (!key)
-                       return -ENOKEY;
-       }
+       data = nvdimm_get_user_key_payload(nvdimm, keyid,
+                       NVDIMM_BASE_KEY, &key);
+       if (!data)
+               return -ENOKEY;
 
-       rc = nvdimm->sec.ops->overwrite(nvdimm, key ? key_data(key) : NULL);
+       rc = nvdimm->sec.ops->overwrite(nvdimm, data);
        dev_dbg(dev, "key: %d overwrite submission: %s\n", key_serial(key),
                        rc == 0 ? "success" : "fail");
 
index 470601980794edd9ebd803199587c62f0586fb03..2c43e12b70afccfb424e62d5099339e5056173e9 100644 (file)
@@ -288,7 +288,7 @@ bool nvme_cancel_request(struct request *req, void *data, bool reserved)
                                "Cancelling I/O %d", req->tag);
 
        nvme_req(req)->status = NVME_SC_ABORT_REQ;
-       blk_mq_complete_request(req);
+       blk_mq_complete_request_sync(req);
        return true;
 }
 EXPORT_SYMBOL_GPL(nvme_cancel_request);
index f3b9d91ba0dfd30ba7c4c3f554e14ea860c389b7..6d8451356eaca468742ecf335ee20763d6f73876 100644 (file)
@@ -1845,7 +1845,7 @@ nvme_fc_init_queue(struct nvme_fc_ctrl *ctrl, int idx)
        memset(queue, 0, sizeof(*queue));
        queue->ctrl = ctrl;
        queue->qnum = idx;
-       atomic_set(&queue->csn, 1);
+       atomic_set(&queue->csn, 0);
        queue->dev = ctrl->dev;
 
        if (idx > 0)
@@ -1887,7 +1887,7 @@ nvme_fc_free_queue(struct nvme_fc_queue *queue)
         */
 
        queue->connection_id = 0;
-       atomic_set(&queue->csn, 1);
+       atomic_set(&queue->csn, 0);
 }
 
 static void
@@ -2183,7 +2183,6 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
 {
        struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
        struct nvme_command *sqe = &cmdiu->sqe;
-       u32 csn;
        int ret, opstate;
 
        /*
@@ -2198,8 +2197,6 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
 
        /* format the FC-NVME CMD IU and fcp_req */
        cmdiu->connection_id = cpu_to_be64(queue->connection_id);
-       csn = atomic_inc_return(&queue->csn);
-       cmdiu->csn = cpu_to_be32(csn);
        cmdiu->data_len = cpu_to_be32(data_len);
        switch (io_dir) {
        case NVMEFC_FCP_WRITE:
@@ -2257,11 +2254,24 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
        if (!(op->flags & FCOP_FLAGS_AEN))
                blk_mq_start_request(op->rq);
 
+       cmdiu->csn = cpu_to_be32(atomic_inc_return(&queue->csn));
        ret = ctrl->lport->ops->fcp_io(&ctrl->lport->localport,
                                        &ctrl->rport->remoteport,
                                        queue->lldd_handle, &op->fcp_req);
 
        if (ret) {
+               /*
+                * If the lld fails to send the command is there an issue with
+                * the csn value?  If the command that fails is the Connect,
+                * no - as the connection won't be live.  If it is a command
+                * post-connect, it's possible a gap in csn may be created.
+                * Does this matter?  As Linux initiators don't send fused
+                * commands, no.  The gap would exist, but as there's nothing
+                * that depends on csn order to be delivered on the target
+                * side, it shouldn't hurt.  It would be difficult for a
+                * target to even detect the csn gap as it has no idea when the
+                * cmd with the csn was supposed to arrive.
+                */
                opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE);
                __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
 
index 2839bb70badfbcb8284bc5bbbc1f457bd3b58c63..f0716f6ce41fa2a1ad993e45adba9148d7f0c120 100644 (file)
@@ -404,15 +404,12 @@ static inline bool nvme_state_is_live(enum nvme_ana_state state)
 static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc,
                struct nvme_ns *ns)
 {
-       enum nvme_ana_state old;
-
        mutex_lock(&ns->head->lock);
-       old = ns->ana_state;
        ns->ana_grpid = le32_to_cpu(desc->grpid);
        ns->ana_state = desc->state;
        clear_bit(NVME_NS_ANA_PENDING, &ns->flags);
 
-       if (nvme_state_is_live(ns->ana_state) && !nvme_state_is_live(old))
+       if (nvme_state_is_live(ns->ana_state))
                nvme_mpath_set_live(ns);
        mutex_unlock(&ns->head->lock);
 }
index e7e08889865e732d503a6ac2af5d38cac4dd9672..68c49dd672104d82ea768a6e9bf4354df731422b 100644 (file)
@@ -627,7 +627,7 @@ static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
        return ret;
 }
 
-static inline void nvme_tcp_end_request(struct request *rq, __le16 status)
+static inline void nvme_tcp_end_request(struct request *rq, u16 status)
 {
        union nvme_result res = {};
 
index 76250181fee0555b2e576651ee67349ecb776392..9f72d515fc4b30a3785b396910660074ad076cf4 100644 (file)
@@ -24,6 +24,11 @@ u32 nvmet_get_log_page_len(struct nvme_command *cmd)
        return len;
 }
 
+u64 nvmet_get_log_page_offset(struct nvme_command *cmd)
+{
+       return le64_to_cpu(cmd->get_log_page.lpo);
+}
+
 static void nvmet_execute_get_log_page_noop(struct nvmet_req *req)
 {
        nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->data_len));
index 2d73b66e368627cdee268a74d30fb3c5d6a34235..b3e765a95af8ee7447c536ff48095504c8100d67 100644 (file)
@@ -509,7 +509,7 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
 
        ret = nvmet_p2pmem_ns_enable(ns);
        if (ret)
-               goto out_unlock;
+               goto out_dev_disable;
 
        list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
                nvmet_p2pmem_ns_add_p2p(ctrl, ns);
@@ -550,7 +550,7 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
 out_dev_put:
        list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
                pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
-
+out_dev_disable:
        nvmet_ns_dev_disable(ns);
        goto out_unlock;
 }
index c872b47a88f31722b358e219c403f2c2cb765988..33ed95e72d6b19598f76df0c50f6fccfdaec37bd 100644 (file)
@@ -131,54 +131,76 @@ static void nvmet_set_disc_traddr(struct nvmet_req *req, struct nvmet_port *port
                memcpy(traddr, port->disc_addr.traddr, NVMF_TRADDR_SIZE);
 }
 
+static size_t discovery_log_entries(struct nvmet_req *req)
+{
+       struct nvmet_ctrl *ctrl = req->sq->ctrl;
+       struct nvmet_subsys_link *p;
+       struct nvmet_port *r;
+       size_t entries = 0;
+
+       list_for_each_entry(p, &req->port->subsystems, entry) {
+               if (!nvmet_host_allowed(p->subsys, ctrl->hostnqn))
+                       continue;
+               entries++;
+       }
+       list_for_each_entry(r, &req->port->referrals, entry)
+               entries++;
+       return entries;
+}
+
 static void nvmet_execute_get_disc_log_page(struct nvmet_req *req)
 {
        const int entry_size = sizeof(struct nvmf_disc_rsp_page_entry);
        struct nvmet_ctrl *ctrl = req->sq->ctrl;
        struct nvmf_disc_rsp_page_hdr *hdr;
+       u64 offset = nvmet_get_log_page_offset(req->cmd);
        size_t data_len = nvmet_get_log_page_len(req->cmd);
-       size_t alloc_len = max(data_len, sizeof(*hdr));
-       int residual_len = data_len - sizeof(*hdr);
+       size_t alloc_len;
        struct nvmet_subsys_link *p;
        struct nvmet_port *r;
        u32 numrec = 0;
        u16 status = 0;
+       void *buffer;
+
+       /* Spec requires dword aligned offsets */
+       if (offset & 0x3) {
+               status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+               goto out;
+       }
 
        /*
         * Make sure we're passing at least a buffer of response header size.
         * If host provided data len is less than the header size, only the
         * number of bytes requested by host will be sent to host.
         */
-       hdr = kzalloc(alloc_len, GFP_KERNEL);
-       if (!hdr) {
+       down_read(&nvmet_config_sem);
+       alloc_len = sizeof(*hdr) + entry_size * discovery_log_entries(req);
+       buffer = kzalloc(alloc_len, GFP_KERNEL);
+       if (!buffer) {
+               up_read(&nvmet_config_sem);
                status = NVME_SC_INTERNAL;
                goto out;
        }
 
-       down_read(&nvmet_config_sem);
+       hdr = buffer;
        list_for_each_entry(p, &req->port->subsystems, entry) {
+               char traddr[NVMF_TRADDR_SIZE];
+
                if (!nvmet_host_allowed(p->subsys, ctrl->hostnqn))
                        continue;
-               if (residual_len >= entry_size) {
-                       char traddr[NVMF_TRADDR_SIZE];
-
-                       nvmet_set_disc_traddr(req, req->port, traddr);
-                       nvmet_format_discovery_entry(hdr, req->port,
-                                       p->subsys->subsysnqn, traddr,
-                                       NVME_NQN_NVME, numrec);
-                       residual_len -= entry_size;
-               }
+
+               nvmet_set_disc_traddr(req, req->port, traddr);
+               nvmet_format_discovery_entry(hdr, req->port,
+                               p->subsys->subsysnqn, traddr,
+                               NVME_NQN_NVME, numrec);
                numrec++;
        }
 
        list_for_each_entry(r, &req->port->referrals, entry) {
-               if (residual_len >= entry_size) {
-                       nvmet_format_discovery_entry(hdr, r,
-                                       NVME_DISC_SUBSYS_NAME,
-                                       r->disc_addr.traddr,
-                                       NVME_NQN_DISC, numrec);
-                       residual_len -= entry_size;
-               }
+               nvmet_format_discovery_entry(hdr, r,
+                               NVME_DISC_SUBSYS_NAME,
+                               r->disc_addr.traddr,
+                               NVME_NQN_DISC, numrec);
                numrec++;
        }
 
@@ -190,8 +212,8 @@ static void nvmet_execute_get_disc_log_page(struct nvmet_req *req)
 
        up_read(&nvmet_config_sem);
 
-       status = nvmet_copy_to_sgl(req, 0, hdr, data_len);
-       kfree(hdr);
+       status = nvmet_copy_to_sgl(req, 0, buffer + offset, data_len);
+       kfree(buffer);
 out:
        nvmet_req_complete(req, status);
 }
index 3e43212d3c1c6bba5a6d553dc2a965188c5ccbf5..bc6ebb51b0bf7c5310940fca19450fd115ea7788 100644 (file)
@@ -75,11 +75,11 @@ int nvmet_file_ns_enable(struct nvmet_ns *ns)
        return ret;
 }
 
-static void nvmet_file_init_bvec(struct bio_vec *bv, struct sg_page_iter *iter)
+static void nvmet_file_init_bvec(struct bio_vec *bv, struct scatterlist *sg)
 {
-       bv->bv_page = sg_page_iter_page(iter);
-       bv->bv_offset = iter->sg->offset;
-       bv->bv_len = PAGE_SIZE - iter->sg->offset;
+       bv->bv_page = sg_page(sg);
+       bv->bv_offset = sg->offset;
+       bv->bv_len = sg->length;
 }
 
 static ssize_t nvmet_file_submit_bvec(struct nvmet_req *req, loff_t pos,
@@ -128,14 +128,14 @@ static void nvmet_file_io_done(struct kiocb *iocb, long ret, long ret2)
 
 static bool nvmet_file_execute_io(struct nvmet_req *req, int ki_flags)
 {
-       ssize_t nr_bvec = DIV_ROUND_UP(req->data_len, PAGE_SIZE);
-       struct sg_page_iter sg_pg_iter;
+       ssize_t nr_bvec = req->sg_cnt;
        unsigned long bv_cnt = 0;
        bool is_sync = false;
        size_t len = 0, total_len = 0;
        ssize_t ret = 0;
        loff_t pos;
-
+       int i;
+       struct scatterlist *sg;
 
        if (req->f.mpool_alloc && nr_bvec > NVMET_MAX_MPOOL_BVEC)
                is_sync = true;
@@ -147,8 +147,8 @@ static bool nvmet_file_execute_io(struct nvmet_req *req, int ki_flags)
        }
 
        memset(&req->f.iocb, 0, sizeof(struct kiocb));
-       for_each_sg_page(req->sg, &sg_pg_iter, req->sg_cnt, 0) {
-               nvmet_file_init_bvec(&req->f.bvec[bv_cnt], &sg_pg_iter);
+       for_each_sg(req->sg, sg, req->sg_cnt, i) {
+               nvmet_file_init_bvec(&req->f.bvec[bv_cnt], sg);
                len += req->f.bvec[bv_cnt].bv_len;
                total_len += req->f.bvec[bv_cnt].bv_len;
                bv_cnt++;
@@ -225,7 +225,7 @@ static void nvmet_file_submit_buffered_io(struct nvmet_req *req)
 
 static void nvmet_file_execute_rw(struct nvmet_req *req)
 {
-       ssize_t nr_bvec = DIV_ROUND_UP(req->data_len, PAGE_SIZE);
+       ssize_t nr_bvec = req->sg_cnt;
 
        if (!req->sg_cnt || !nr_bvec) {
                nvmet_req_complete(req, 0);
index 51e49efd7849df640b5e7cb9fa9715ada7d373e4..1653d19b187fd5de826875cdcf5675c8fcb4431c 100644 (file)
@@ -428,6 +428,7 @@ u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf,
 u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len);
 
 u32 nvmet_get_log_page_len(struct nvme_command *cmd);
+u64 nvmet_get_log_page_offset(struct nvme_command *cmd);
 
 extern struct list_head *nvmet_ports;
 void nvmet_port_disc_changed(struct nvmet_port *port,
index 1be571c20062c53341e141791b7137bae129ec6e..6bad04cbb1d37b8e9a6227e8d05eca6ce8545642 100644 (file)
 #define DBG_IRT(x...)
 #endif
 
+#ifdef CONFIG_64BIT
+#define COMPARE_IRTE_ADDR(irte, hpa)   ((irte)->dest_iosapic_addr == (hpa))
+#else
 #define COMPARE_IRTE_ADDR(irte, hpa)   \
-               ((irte)->dest_iosapic_addr == F_EXTEND(hpa))
+               ((irte)->dest_iosapic_addr == ((hpa) | 0xffffffff00000000ULL))
+#endif
 
 #define IOSAPIC_REG_SELECT              0x00
 #define IOSAPIC_REG_WINDOW              0x10
index 3f3df4c29f6e1d40112343e91f902735a6b4d535..905282a8ddaaeda2f8df06570bfb3716e2b2479c 100644 (file)
@@ -115,6 +115,10 @@ static void remove_board(struct controller *ctrl, bool safe_removal)
                 * removed from the slot/adapter.
                 */
                msleep(1000);
+
+               /* Ignore link or presence changes caused by power off */
+               atomic_and(~(PCI_EXP_SLTSTA_DLLSC | PCI_EXP_SLTSTA_PDC),
+                          &ctrl->pending_events);
        }
 
        /* turn off Green LED */
index 224d886341158ba55494da1c766a933b0cfaeefc..d994839a3e24b5ec8c1452f3489c47d8fa7aba20 100644 (file)
@@ -273,6 +273,7 @@ enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev);
 u32 pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed,
                           enum pcie_link_width *width);
 void __pcie_print_link_status(struct pci_dev *dev, bool verbose);
+void pcie_report_downtraining(struct pci_dev *dev);
 
 /* Single Root I/O Virtualization */
 struct pci_sriov {
index d2eae3b7cc0f74d5c8fdec80fa6ffffd68dd8501..4fa9e3523ee1a22bc763aa5ea0f162dc00ab09dd 100644 (file)
@@ -30,6 +30,8 @@ static void pcie_enable_link_bandwidth_notification(struct pci_dev *dev)
 {
        u16 lnk_ctl;
 
+       pcie_capability_write_word(dev, PCI_EXP_LNKSTA, PCI_EXP_LNKSTA_LBMS);
+
        pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &lnk_ctl);
        lnk_ctl |= PCI_EXP_LNKCTL_LBMIE;
        pcie_capability_write_word(dev, PCI_EXP_LNKCTL, lnk_ctl);
@@ -44,11 +46,10 @@ static void pcie_disable_link_bandwidth_notification(struct pci_dev *dev)
        pcie_capability_write_word(dev, PCI_EXP_LNKCTL, lnk_ctl);
 }
 
-static irqreturn_t pcie_bw_notification_handler(int irq, void *context)
+static irqreturn_t pcie_bw_notification_irq(int irq, void *context)
 {
        struct pcie_device *srv = context;
        struct pci_dev *port = srv->port;
-       struct pci_dev *dev;
        u16 link_status, events;
        int ret;
 
@@ -58,17 +59,26 @@ static irqreturn_t pcie_bw_notification_handler(int irq, void *context)
        if (ret != PCIBIOS_SUCCESSFUL || !events)
                return IRQ_NONE;
 
+       pcie_capability_write_word(port, PCI_EXP_LNKSTA, events);
+       pcie_update_link_speed(port->subordinate, link_status);
+       return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t pcie_bw_notification_handler(int irq, void *context)
+{
+       struct pcie_device *srv = context;
+       struct pci_dev *port = srv->port;
+       struct pci_dev *dev;
+
        /*
         * Print status from downstream devices, not this root port or
         * downstream switch port.
         */
        down_read(&pci_bus_sem);
        list_for_each_entry(dev, &port->subordinate->devices, bus_list)
-               __pcie_print_link_status(dev, false);
+               pcie_report_downtraining(dev);
        up_read(&pci_bus_sem);
 
-       pcie_update_link_speed(port->subordinate, link_status);
-       pcie_capability_write_word(port, PCI_EXP_LNKSTA, events);
        return IRQ_HANDLED;
 }
 
@@ -80,7 +90,8 @@ static int pcie_bandwidth_notification_probe(struct pcie_device *srv)
        if (!pcie_link_bandwidth_notification_supported(srv->port))
                return -ENODEV;
 
-       ret = request_threaded_irq(srv->irq, NULL, pcie_bw_notification_handler,
+       ret = request_threaded_irq(srv->irq, pcie_bw_notification_irq,
+                                  pcie_bw_notification_handler,
                                   IRQF_SHARED, "PCIe BW notif", srv);
        if (ret)
                return ret;
index 2ec0df04e0dca15ce1f56b3f9049280f199e0928..7e12d016386394ab9b401f3e5dcb8da8b917484c 100644 (file)
@@ -2388,7 +2388,7 @@ static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
        return dev;
 }
 
-static void pcie_report_downtraining(struct pci_dev *dev)
+void pcie_report_downtraining(struct pci_dev *dev)
 {
        if (!pci_is_pcie(dev))
                return;
index a59ad09ce911d564c074930ea22968fcfab928e7..a077f67fe1dac17508d09e954cf4e5acead355d6 100644 (file)
@@ -3877,6 +3877,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9128,
 /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c14 */
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9130,
                         quirk_dma_func1_alias);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9170,
+                        quirk_dma_func1_alias);
 /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c47 + c57 */
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9172,
                         quirk_dma_func1_alias);
index 5163097b43dff1472af1b905936588750d45b9a8..4bbd9ede38c8355a9bf226e80eaabc19bc9eda6e 100644 (file)
@@ -485,8 +485,11 @@ static int sun4i_usb_phy_set_mode(struct phy *_phy,
        struct sun4i_usb_phy_data *data = to_sun4i_usb_phy_data(phy);
        int new_mode;
 
-       if (phy->index != 0)
+       if (phy->index != 0) {
+               if (mode == PHY_MODE_USB_HOST)
+                       return 0;
                return -EINVAL;
+       }
 
        switch (mode) {
        case PHY_MODE_USB_HOST:
index 8f018b3f3cd4c42ef40764d39587e7e2e11285d1..c7039f52ad51802afa773525af8eed45e5438ac1 100644 (file)
@@ -17,6 +17,7 @@
 
 #include <linux/debugfs.h>
 #include <linux/device.h>
+#include <linux/dmi.h>
 #include <linux/init.h>
 #include <linux/io.h>
 #include <linux/platform_data/x86/clk-pmc-atom.h>
@@ -391,11 +392,27 @@ static int pmc_dbgfs_register(struct pmc_dev *pmc)
 }
 #endif /* CONFIG_DEBUG_FS */
 
+/*
+ * Some systems need one or more of their pmc_plt_clks to be
+ * marked as critical.
+ */
+static const struct dmi_system_id critclk_systems[] = {
+       {
+               .ident = "MPL CEC1x",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "MPL AG"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "CEC10 Family"),
+               },
+       },
+       { /*sentinel*/ }
+};
+
 static int pmc_setup_clks(struct pci_dev *pdev, void __iomem *pmc_regmap,
                          const struct pmc_data *pmc_data)
 {
        struct platform_device *clkdev;
        struct pmc_clk_data *clk_data;
+       const struct dmi_system_id *d = dmi_first_match(critclk_systems);
 
        clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL);
        if (!clk_data)
@@ -403,6 +420,10 @@ static int pmc_setup_clks(struct pci_dev *pdev, void __iomem *pmc_regmap,
 
        clk_data->base = pmc_regmap; /* offset is added by client */
        clk_data->clks = pmc_data->clks;
+       if (d) {
+               clk_data->critical = true;
+               pr_info("%s critclks quirk enabled\n", d->ident);
+       }
 
        clkdev = platform_device_register_data(&pdev->dev, "clk-pmc-atom",
                                               PLATFORM_DEVID_NONE,
index 91751617b37af33b6241be4d13647775d5444881..c53a2185a0393c689c631d7df7f1b760cb2b72e0 100644 (file)
@@ -130,6 +130,7 @@ static int meson_audio_arb_probe(struct platform_device *pdev)
        arb->rstc.nr_resets = ARRAY_SIZE(axg_audio_arb_reset_bits);
        arb->rstc.ops = &meson_audio_arb_rstc_ops;
        arb->rstc.of_node = dev->of_node;
+       arb->rstc.owner = THIS_MODULE;
 
        /*
         * Enable general :
index a71734c416939354129253af00090975ff0f1b5e..f933c06bff4f804a3e77408d51fe15606b62e135 100644 (file)
@@ -667,9 +667,9 @@ config RTC_DRV_S5M
          will be called rtc-s5m.
 
 config RTC_DRV_SD3078
-    tristate "ZXW Crystal SD3078"
+    tristate "ZXW Shenzhen whwave SD3078"
     help
-      If you say yes here you get support for the ZXW Crystal
+      If you say yes here you get support for the ZXW Shenzhen whwave
       SD3078 RTC chips.
 
       This driver can also be built as a module. If so, the module
index e5444296075ee147e74c93d35d3eb98e6a2b0c48..4d6bf9304ceb35932dfadbc921b2e658e5e3d2ec 100644 (file)
@@ -298,7 +298,7 @@ static int cros_ec_rtc_suspend(struct device *dev)
        struct cros_ec_rtc *cros_ec_rtc = dev_get_drvdata(&pdev->dev);
 
        if (device_may_wakeup(dev))
-               enable_irq_wake(cros_ec_rtc->cros_ec->irq);
+               return enable_irq_wake(cros_ec_rtc->cros_ec->irq);
 
        return 0;
 }
@@ -309,7 +309,7 @@ static int cros_ec_rtc_resume(struct device *dev)
        struct cros_ec_rtc *cros_ec_rtc = dev_get_drvdata(&pdev->dev);
 
        if (device_may_wakeup(dev))
-               disable_irq_wake(cros_ec_rtc->cros_ec->irq);
+               return disable_irq_wake(cros_ec_rtc->cros_ec->irq);
 
        return 0;
 }
index b4e054c64bad9e54d23adb3a47da1008224906b3..69b54e5556c06234c5339431f3149bc923ebcf49 100644 (file)
@@ -480,6 +480,13 @@ static int da9063_rtc_probe(struct platform_device *pdev)
        da9063_data_to_tm(data, &rtc->alarm_time, rtc);
        rtc->rtc_sync = false;
 
+       /*
+        * TODO: some models have alarms on a minute boundary but still support
+        * real hardware interrupts. Add this once the core supports it.
+        */
+       if (config->rtc_data_start != RTC_SEC)
+               rtc->rtc_dev->uie_unsupported = 1;
+
        irq_alarm = platform_get_irq_byname(pdev, "ALARM");
        ret = devm_request_threaded_irq(&pdev->dev, irq_alarm, NULL,
                                        da9063_alarm_event,
index d417b203cbc553eb25ab0cbf0eb493bcd84c9d46..1d3de2a3d1a4d7a0ad5a7d327efaaca0d72f468c 100644 (file)
@@ -374,7 +374,7 @@ static int sh_rtc_set_time(struct device *dev, struct rtc_time *tm)
 static inline int sh_rtc_read_alarm_value(struct sh_rtc *rtc, int reg_off)
 {
        unsigned int byte;
-       int value = 0xff;       /* return 0xff for ignored values */
+       int value = -1;                 /* return -1 for ignored values */
 
        byte = readb(rtc->regbase + reg_off);
        if (byte & AR_ENB) {
index 4159c63a5fd2bbba9b9c2949fde8c56ba9030a89..a835b31aad999dcbc90847455b0c75f612aba563 100644 (file)
@@ -24,6 +24,7 @@
 #include <asm/crw.h>
 #include <asm/isc.h>
 #include <asm/ebcdic.h>
+#include <asm/ap.h>
 
 #include "css.h"
 #include "cio.h"
@@ -586,6 +587,15 @@ static void chsc_process_sei_scm_avail(struct chsc_sei_nt0_area *sei_area)
                              " failed (rc=%d).\n", ret);
 }
 
+static void chsc_process_sei_ap_cfg_chg(struct chsc_sei_nt0_area *sei_area)
+{
+       CIO_CRW_EVENT(3, "chsc: ap config changed\n");
+       if (sei_area->rs != 5)
+               return;
+
+       ap_bus_cfg_chg();
+}
+
 static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area)
 {
        switch (sei_area->cc) {
@@ -612,6 +622,9 @@ static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area)
        case 2: /* i/o resource accessibility */
                chsc_process_sei_res_acc(sei_area);
                break;
+       case 3: /* ap config changed */
+               chsc_process_sei_ap_cfg_chg(sei_area);
+               break;
        case 7: /* channel-path-availability information */
                chsc_process_sei_chp_avail(sei_area);
                break;
index a10cec0e86eb495ffd45f3854a09e1a76bf3e598..0b3b9de45c602042384751921379b0d903e5be79 100644 (file)
@@ -72,20 +72,24 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work)
 {
        struct vfio_ccw_private *private;
        struct irb *irb;
+       bool is_final;
 
        private = container_of(work, struct vfio_ccw_private, io_work);
        irb = &private->irb;
 
+       is_final = !(scsw_actl(&irb->scsw) &
+                    (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT));
        if (scsw_is_solicited(&irb->scsw)) {
                cp_update_scsw(&private->cp, &irb->scsw);
-               cp_free(&private->cp);
+               if (is_final)
+                       cp_free(&private->cp);
        }
        memcpy(private->io_region->irb_area, irb, sizeof(*irb));
 
        if (private->io_trigger)
                eventfd_signal(private->io_trigger, 1);
 
-       if (private->mdev)
+       if (private->mdev && is_final)
                private->state = VFIO_CCW_STATE_IDLE;
 }
 
index e15816ff126582f933c66add86bb45e7b0606e0f..1546389d71dbca7ebc1f2f103780182742226376 100644 (file)
@@ -810,11 +810,18 @@ static int ap_device_remove(struct device *dev)
        struct ap_device *ap_dev = to_ap_dev(dev);
        struct ap_driver *ap_drv = ap_dev->drv;
 
+       /* prepare ap queue device removal */
        if (is_queue_dev(dev))
-               ap_queue_remove(to_ap_queue(dev));
+               ap_queue_prepare_remove(to_ap_queue(dev));
+
+       /* driver's chance to clean up gracefully */
        if (ap_drv->remove)
                ap_drv->remove(ap_dev);
 
+       /* now do the ap queue device remove */
+       if (is_queue_dev(dev))
+               ap_queue_remove(to_ap_queue(dev));
+
        /* Remove queue/card from list of active queues/cards */
        spin_lock_bh(&ap_list_lock);
        if (is_card_dev(dev))
@@ -860,6 +867,16 @@ void ap_bus_force_rescan(void)
 }
 EXPORT_SYMBOL(ap_bus_force_rescan);
 
+/*
+* A config change has happened, force an ap bus rescan.
+*/
+void ap_bus_cfg_chg(void)
+{
+       AP_DBF(DBF_INFO, "%s config change, forcing bus rescan\n", __func__);
+
+       ap_bus_force_rescan();
+}
+
 /*
  * hex2bitmap() - parse hex mask string and set bitmap.
  * Valid strings are "0x012345678" with at least one valid hex number.
index d0059eae5d94bd51a5c677c28162ed63c9f0d437..15a98a673c5cc3323980f15e95d3418b1c65e028 100644 (file)
@@ -91,6 +91,7 @@ enum ap_state {
        AP_STATE_WORKING,
        AP_STATE_QUEUE_FULL,
        AP_STATE_SUSPEND_WAIT,
+       AP_STATE_REMOVE,        /* about to be removed from driver */
        AP_STATE_UNBOUND,       /* momentary not bound to a driver */
        AP_STATE_BORKED,        /* broken */
        NR_AP_STATES
@@ -252,6 +253,7 @@ void ap_bus_force_rescan(void);
 
 void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *ap_msg);
 struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type);
+void ap_queue_prepare_remove(struct ap_queue *aq);
 void ap_queue_remove(struct ap_queue *aq);
 void ap_queue_suspend(struct ap_device *ap_dev);
 void ap_queue_resume(struct ap_device *ap_dev);
index ba261210c6da0518fe7f8f4cb8f702b0503464b9..6a340f2c355693170776992c6a1d018e78d6ee96 100644 (file)
@@ -420,6 +420,10 @@ static ap_func_t *ap_jumptable[NR_AP_STATES][NR_AP_EVENTS] = {
                [AP_EVENT_POLL] = ap_sm_suspend_read,
                [AP_EVENT_TIMEOUT] = ap_sm_nop,
        },
+       [AP_STATE_REMOVE] = {
+               [AP_EVENT_POLL] = ap_sm_nop,
+               [AP_EVENT_TIMEOUT] = ap_sm_nop,
+       },
        [AP_STATE_UNBOUND] = {
                [AP_EVENT_POLL] = ap_sm_nop,
                [AP_EVENT_TIMEOUT] = ap_sm_nop,
@@ -740,18 +744,31 @@ void ap_flush_queue(struct ap_queue *aq)
 }
 EXPORT_SYMBOL(ap_flush_queue);
 
-void ap_queue_remove(struct ap_queue *aq)
+void ap_queue_prepare_remove(struct ap_queue *aq)
 {
-       ap_flush_queue(aq);
+       spin_lock_bh(&aq->lock);
+       /* flush queue */
+       __ap_flush_queue(aq);
+       /* set REMOVE state to prevent new messages are queued in */
+       aq->state = AP_STATE_REMOVE;
        del_timer_sync(&aq->timeout);
+       spin_unlock_bh(&aq->lock);
+}
 
-       /* reset with zero, also clears irq registration */
+void ap_queue_remove(struct ap_queue *aq)
+{
+       /*
+        * all messages have been flushed and the state is
+        * AP_STATE_REMOVE. Now reset with zero which also
+        * clears the irq registration and move the state
+        * to AP_STATE_UNBOUND to signal that this queue
+        * is not used by any driver currently.
+        */
        spin_lock_bh(&aq->lock);
        ap_zapq(aq->qid);
        aq->state = AP_STATE_UNBOUND;
        spin_unlock_bh(&aq->lock);
 }
-EXPORT_SYMBOL(ap_queue_remove);
 
 void ap_queue_reinit_state(struct ap_queue *aq)
 {
@@ -760,4 +777,3 @@ void ap_queue_reinit_state(struct ap_queue *aq)
        ap_wait(ap_sm_event(aq, AP_EVENT_POLL));
        spin_unlock_bh(&aq->lock);
 }
-EXPORT_SYMBOL(ap_queue_reinit_state);
index eb93c2d27d0ad142c4d977d74df3e415468336af..689c2af7026a3adcf08e2e6eb019d9352e6de9d4 100644 (file)
@@ -586,6 +586,7 @@ static inline bool zcrypt_check_queue(struct ap_perms *perms, int queue)
 
 static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc,
                                                     struct zcrypt_queue *zq,
+                                                    struct module **pmod,
                                                     unsigned int weight)
 {
        if (!zq || !try_module_get(zq->queue->ap_dev.drv->driver.owner))
@@ -595,15 +596,15 @@ static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc,
        atomic_add(weight, &zc->load);
        atomic_add(weight, &zq->load);
        zq->request_count++;
+       *pmod = zq->queue->ap_dev.drv->driver.owner;
        return zq;
 }
 
 static inline void zcrypt_drop_queue(struct zcrypt_card *zc,
                                     struct zcrypt_queue *zq,
+                                    struct module *mod,
                                     unsigned int weight)
 {
-       struct module *mod = zq->queue->ap_dev.drv->driver.owner;
-
        zq->request_count--;
        atomic_sub(weight, &zc->load);
        atomic_sub(weight, &zq->load);
@@ -653,6 +654,7 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms,
        unsigned int weight, pref_weight;
        unsigned int func_code;
        int qid = 0, rc = -ENODEV;
+       struct module *mod;
 
        trace_s390_zcrypt_req(mex, TP_ICARSAMODEXPO);
 
@@ -706,7 +708,7 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms,
                        pref_weight = weight;
                }
        }
-       pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
+       pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight);
        spin_unlock(&zcrypt_list_lock);
 
        if (!pref_zq) {
@@ -718,7 +720,7 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms,
        rc = pref_zq->ops->rsa_modexpo(pref_zq, mex);
 
        spin_lock(&zcrypt_list_lock);
-       zcrypt_drop_queue(pref_zc, pref_zq, weight);
+       zcrypt_drop_queue(pref_zc, pref_zq, mod, weight);
        spin_unlock(&zcrypt_list_lock);
 
 out:
@@ -735,6 +737,7 @@ static long zcrypt_rsa_crt(struct ap_perms *perms,
        unsigned int weight, pref_weight;
        unsigned int func_code;
        int qid = 0, rc = -ENODEV;
+       struct module *mod;
 
        trace_s390_zcrypt_req(crt, TP_ICARSACRT);
 
@@ -788,7 +791,7 @@ static long zcrypt_rsa_crt(struct ap_perms *perms,
                        pref_weight = weight;
                }
        }
-       pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
+       pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight);
        spin_unlock(&zcrypt_list_lock);
 
        if (!pref_zq) {
@@ -800,7 +803,7 @@ static long zcrypt_rsa_crt(struct ap_perms *perms,
        rc = pref_zq->ops->rsa_modexpo_crt(pref_zq, crt);
 
        spin_lock(&zcrypt_list_lock);
-       zcrypt_drop_queue(pref_zc, pref_zq, weight);
+       zcrypt_drop_queue(pref_zc, pref_zq, mod, weight);
        spin_unlock(&zcrypt_list_lock);
 
 out:
@@ -819,6 +822,7 @@ static long _zcrypt_send_cprb(struct ap_perms *perms,
        unsigned int func_code;
        unsigned short *domain;
        int qid = 0, rc = -ENODEV;
+       struct module *mod;
 
        trace_s390_zcrypt_req(xcRB, TB_ZSECSENDCPRB);
 
@@ -865,7 +869,7 @@ static long _zcrypt_send_cprb(struct ap_perms *perms,
                        pref_weight = weight;
                }
        }
-       pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
+       pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight);
        spin_unlock(&zcrypt_list_lock);
 
        if (!pref_zq) {
@@ -881,7 +885,7 @@ static long _zcrypt_send_cprb(struct ap_perms *perms,
        rc = pref_zq->ops->send_cprb(pref_zq, xcRB, &ap_msg);
 
        spin_lock(&zcrypt_list_lock);
-       zcrypt_drop_queue(pref_zc, pref_zq, weight);
+       zcrypt_drop_queue(pref_zc, pref_zq, mod, weight);
        spin_unlock(&zcrypt_list_lock);
 
 out:
@@ -932,6 +936,7 @@ static long zcrypt_send_ep11_cprb(struct ap_perms *perms,
        unsigned int func_code;
        struct ap_message ap_msg;
        int qid = 0, rc = -ENODEV;
+       struct module *mod;
 
        trace_s390_zcrypt_req(xcrb, TP_ZSENDEP11CPRB);
 
@@ -1000,7 +1005,7 @@ static long zcrypt_send_ep11_cprb(struct ap_perms *perms,
                        pref_weight = weight;
                }
        }
-       pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
+       pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight);
        spin_unlock(&zcrypt_list_lock);
 
        if (!pref_zq) {
@@ -1012,7 +1017,7 @@ static long zcrypt_send_ep11_cprb(struct ap_perms *perms,
        rc = pref_zq->ops->send_ep11_cprb(pref_zq, xcrb, &ap_msg);
 
        spin_lock(&zcrypt_list_lock);
-       zcrypt_drop_queue(pref_zc, pref_zq, weight);
+       zcrypt_drop_queue(pref_zc, pref_zq, mod, weight);
        spin_unlock(&zcrypt_list_lock);
 
 out_free:
@@ -1033,6 +1038,7 @@ static long zcrypt_rng(char *buffer)
        struct ap_message ap_msg;
        unsigned int domain;
        int qid = 0, rc = -ENODEV;
+       struct module *mod;
 
        trace_s390_zcrypt_req(buffer, TP_HWRNGCPRB);
 
@@ -1064,7 +1070,7 @@ static long zcrypt_rng(char *buffer)
                        pref_weight = weight;
                }
        }
-       pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
+       pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight);
        spin_unlock(&zcrypt_list_lock);
 
        if (!pref_zq) {
@@ -1076,7 +1082,7 @@ static long zcrypt_rng(char *buffer)
        rc = pref_zq->ops->rng(pref_zq, buffer, &ap_msg);
 
        spin_lock(&zcrypt_list_lock);
-       zcrypt_drop_queue(pref_zc, pref_zq, weight);
+       zcrypt_drop_queue(pref_zc, pref_zq, mod, weight);
        spin_unlock(&zcrypt_list_lock);
 
 out:
index 4c3a2db0cf2e15dcb9454e217321cd4e2a87d435..fbaf434e2e3449c028a5d3f2aaf7c81c6d1cb56d 100644 (file)
@@ -219,6 +219,9 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa,
 /* QDIO queue and buffer handling                                            */
 /*****************************************************************************/
 #define QETH_MAX_QUEUES 4
+#define QETH_IQD_MIN_TXQ       2       /* One for ucast, one for mcast. */
+#define QETH_IQD_MCAST_TXQ     0
+#define QETH_IQD_MIN_UCAST_TXQ 1
 #define QETH_IN_BUF_SIZE_DEFAULT 65536
 #define QETH_IN_BUF_COUNT_DEFAULT 64
 #define QETH_IN_BUF_COUNT_HSDEFAULT 128
@@ -464,7 +467,6 @@ struct qeth_card_stats {
        u64 rx_errors;
        u64 rx_dropped;
        u64 rx_multicast;
-       u64 tx_errors;
 };
 
 struct qeth_out_q_stats {
@@ -479,6 +481,7 @@ struct qeth_out_q_stats {
        u64 skbs_linearized_fail;
        u64 tso_bytes;
        u64 packing_mode_switch;
+       u64 stopped;
 
        /* rtnl_link_stats64 */
        u64 tx_packets;
@@ -509,6 +512,11 @@ struct qeth_qdio_out_q {
        atomic_t set_pci_flags_count;
 };
 
+static inline bool qeth_out_queue_is_full(struct qeth_qdio_out_q *queue)
+{
+       return atomic_read(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q;
+}
+
 struct qeth_qdio_info {
        atomic_t state;
        /* input */
@@ -836,6 +844,15 @@ static inline bool qeth_netdev_is_registered(struct net_device *dev)
        return dev->netdev_ops != NULL;
 }
 
+static inline u16 qeth_iqd_translate_txq(struct net_device *dev, u16 txq)
+{
+       if (txq == QETH_IQD_MCAST_TXQ)
+               return dev->num_tx_queues - 1;
+       if (txq == dev->num_tx_queues - 1)
+               return QETH_IQD_MCAST_TXQ;
+       return txq;
+}
+
 static inline void qeth_scrub_qdio_buffer(struct qdio_buffer *buf,
                                          unsigned int elements)
 {
@@ -931,18 +948,7 @@ static inline int qeth_send_simple_setassparms_v6(struct qeth_card *card,
                                                 data, QETH_PROT_IPV6);
 }
 
-int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb,
-                           int ipv);
-static inline struct qeth_qdio_out_q *qeth_get_tx_queue(struct qeth_card *card,
-                                                       struct sk_buff *skb,
-                                                       int ipv, int cast_type)
-{
-       if (IS_IQD(card) && cast_type != RTN_UNICAST)
-               return card->qdio.out_qs[card->qdio.no_out_queues - 1];
-       if (!card->qdio.do_prio_queueing)
-               return card->qdio.out_qs[card->qdio.default_out_queue];
-       return card->qdio.out_qs[qeth_get_priority_queue(card, skb, ipv)];
-}
+int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb);
 
 extern struct qeth_discipline qeth_l2_discipline;
 extern struct qeth_discipline qeth_l3_discipline;
@@ -988,7 +994,7 @@ void qeth_clear_ipacmd_list(struct qeth_card *);
 int qeth_qdio_clear_card(struct qeth_card *, int);
 void qeth_clear_working_pool_list(struct qeth_card *);
 void qeth_clear_cmd_buffers(struct qeth_channel *);
-void qeth_clear_qdio_buffers(struct qeth_card *);
+void qeth_drain_output_queues(struct qeth_card *card);
 void qeth_setadp_promisc_mode(struct qeth_card *);
 int qeth_setadpparms_change_macaddr(struct qeth_card *);
 void qeth_tx_timeout(struct net_device *);
@@ -1023,6 +1029,8 @@ netdev_features_t qeth_features_check(struct sk_buff *skb,
                                      struct net_device *dev,
                                      netdev_features_t features);
 void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats);
+u16 qeth_iqd_select_queue(struct net_device *dev, struct sk_buff *skb,
+                         u8 cast_type, struct net_device *sb_dev);
 int qeth_open(struct net_device *dev);
 int qeth_stop(struct net_device *dev);
 
index 2b75f76f23fd5d463b2896c5becfc04cd4b2b29a..d057ead200b5f42c1b10b9b69220a7f322bb2148 100644 (file)
@@ -67,7 +67,7 @@ static void qeth_issue_next_read_cb(struct qeth_card *card,
 static struct qeth_cmd_buffer *qeth_get_buffer(struct qeth_channel *);
 static void qeth_free_buffer_pool(struct qeth_card *);
 static int qeth_qdio_establish(struct qeth_card *);
-static void qeth_free_qdio_buffers(struct qeth_card *);
+static void qeth_free_qdio_queues(struct qeth_card *card);
 static void qeth_notify_skbs(struct qeth_qdio_out_q *queue,
                struct qeth_qdio_out_buffer *buf,
                enum iucv_tx_notify notification);
@@ -1178,7 +1178,7 @@ static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
        atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
 }
 
-static void qeth_clear_outq_buffers(struct qeth_qdio_out_q *q, int free)
+static void qeth_drain_output_queue(struct qeth_qdio_out_q *q, bool free)
 {
        int j;
 
@@ -1194,19 +1194,18 @@ static void qeth_clear_outq_buffers(struct qeth_qdio_out_q *q, int free)
        }
 }
 
-void qeth_clear_qdio_buffers(struct qeth_card *card)
+void qeth_drain_output_queues(struct qeth_card *card)
 {
        int i;
 
        QETH_CARD_TEXT(card, 2, "clearqdbf");
        /* clear outbound buffers to free skbs */
        for (i = 0; i < card->qdio.no_out_queues; ++i) {
-               if (card->qdio.out_qs[i]) {
-                       qeth_clear_outq_buffers(card->qdio.out_qs[i], 0);
-               }
+               if (card->qdio.out_qs[i])
+                       qeth_drain_output_queue(card->qdio.out_qs[i], false);
        }
 }
-EXPORT_SYMBOL_GPL(qeth_clear_qdio_buffers);
+EXPORT_SYMBOL_GPL(qeth_drain_output_queues);
 
 static void qeth_free_buffer_pool(struct qeth_card *card)
 {
@@ -1276,30 +1275,28 @@ static int qeth_setup_channel(struct qeth_channel *channel, bool alloc_buffers)
        return 0;
 }
 
-static void qeth_set_single_write_queues(struct qeth_card *card)
+static void qeth_osa_set_output_queues(struct qeth_card *card, bool single)
 {
-       if ((atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED) &&
-           (card->qdio.no_out_queues == 4))
-               qeth_free_qdio_buffers(card);
+       unsigned int count = single ? 1 : card->dev->num_tx_queues;
 
-       card->qdio.no_out_queues = 1;
-       if (card->qdio.default_out_queue != 0)
-               dev_info(&card->gdev->dev, "Priority Queueing not supported\n");
+       rtnl_lock();
+       netif_set_real_num_tx_queues(card->dev, count);
+       rtnl_unlock();
 
-       card->qdio.default_out_queue = 0;
-}
+       if (card->qdio.no_out_queues == count)
+               return;
 
-static void qeth_set_multiple_write_queues(struct qeth_card *card)
-{
-       if ((atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED) &&
-           (card->qdio.no_out_queues == 1)) {
-               qeth_free_qdio_buffers(card);
-               card->qdio.default_out_queue = 2;
-       }
-       card->qdio.no_out_queues = 4;
+       if (atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED)
+               qeth_free_qdio_queues(card);
+
+       if (count == 1)
+               dev_info(&card->gdev->dev, "Priority Queueing not supported\n");
+
+       card->qdio.default_out_queue = single ? 0 : QETH_DEFAULT_QUEUE;
+       card->qdio.no_out_queues = count;
 }
 
-static void qeth_update_from_chp_desc(struct qeth_card *card)
+static int qeth_update_from_chp_desc(struct qeth_card *card)
 {
        struct ccw_device *ccwdev;
        struct channel_path_desc_fmt0 *chp_dsc;
@@ -1309,21 +1306,18 @@ static void qeth_update_from_chp_desc(struct qeth_card *card)
        ccwdev = card->data.ccwdev;
        chp_dsc = ccw_device_get_chp_desc(ccwdev, 0);
        if (!chp_dsc)
-               goto out;
+               return -ENOMEM;
 
        card->info.func_level = 0x4100 + chp_dsc->desc;
-       if (card->info.type == QETH_CARD_TYPE_IQD)
-               goto out;
 
-       /* CHPP field bit 6 == 1 -> single queue */
-       if ((chp_dsc->chpp & 0x02) == 0x02)
-               qeth_set_single_write_queues(card);
-       else
-               qeth_set_multiple_write_queues(card);
-out:
+       if (IS_OSD(card) || IS_OSX(card))
+               /* CHPP field bit 6 == 1 -> single queue */
+               qeth_osa_set_output_queues(card, chp_dsc->chpp & 0x02);
+
        kfree(chp_dsc);
        QETH_DBF_TEXT_(SETUP, 2, "nr:%x", card->qdio.no_out_queues);
        QETH_DBF_TEXT_(SETUP, 2, "lvl:%02x", card->info.func_level);
+       return 0;
 }
 
 static void qeth_init_qdio_info(struct qeth_card *card)
@@ -1332,7 +1326,6 @@ static void qeth_init_qdio_info(struct qeth_card *card)
        atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
        card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
        card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
-       card->qdio.no_out_queues = QETH_MAX_QUEUES;
 
        /* inbound */
        card->qdio.no_in_queues = 1;
@@ -2177,7 +2170,7 @@ static int qeth_update_max_mtu(struct qeth_card *card, unsigned int max_mtu)
                /* adjust RX buffer size to new max MTU: */
                card->qdio.in_buf_size = max_mtu + 2 * PAGE_SIZE;
                if (dev->max_mtu && dev->max_mtu != max_mtu)
-                       qeth_free_qdio_buffers(card);
+                       qeth_free_qdio_queues(card);
        } else {
                if (dev->mtu)
                        new_mtu = dev->mtu;
@@ -2350,12 +2343,12 @@ static void qeth_free_output_queue(struct qeth_qdio_out_q *q)
        if (!q)
                return;
 
-       qeth_clear_outq_buffers(q, 1);
+       qeth_drain_output_queue(q, true);
        qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
        kfree(q);
 }
 
-static struct qeth_qdio_out_q *qeth_alloc_qdio_out_buf(void)
+static struct qeth_qdio_out_q *qeth_alloc_output_queue(void)
 {
        struct qeth_qdio_out_q *q = kzalloc(sizeof(*q), GFP_KERNEL);
 
@@ -2369,7 +2362,7 @@ static struct qeth_qdio_out_q *qeth_alloc_qdio_out_buf(void)
        return q;
 }
 
-static int qeth_alloc_qdio_buffers(struct qeth_card *card)
+static int qeth_alloc_qdio_queues(struct qeth_card *card)
 {
        int i, j;
 
@@ -2390,7 +2383,7 @@ static int qeth_alloc_qdio_buffers(struct qeth_card *card)
 
        /* outbound */
        for (i = 0; i < card->qdio.no_out_queues; ++i) {
-               card->qdio.out_qs[i] = qeth_alloc_qdio_out_buf();
+               card->qdio.out_qs[i] = qeth_alloc_output_queue();
                if (!card->qdio.out_qs[i])
                        goto out_freeoutq;
                QETH_DBF_TEXT_(SETUP, 2, "outq %i", i);
@@ -2431,7 +2424,7 @@ static int qeth_alloc_qdio_buffers(struct qeth_card *card)
        return -ENOMEM;
 }
 
-static void qeth_free_qdio_buffers(struct qeth_card *card)
+static void qeth_free_qdio_queues(struct qeth_card *card)
 {
        int i, j;
 
@@ -2538,7 +2531,7 @@ static int qeth_mpc_initialize(struct qeth_card *card)
                QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
                goto out_qdio;
        }
-       rc = qeth_alloc_qdio_buffers(card);
+       rc = qeth_alloc_qdio_queues(card);
        if (rc) {
                QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
                goto out_qdio;
@@ -2546,7 +2539,7 @@ static int qeth_mpc_initialize(struct qeth_card *card)
        rc = qeth_qdio_establish(card);
        if (rc) {
                QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
-               qeth_free_qdio_buffers(card);
+               qeth_free_qdio_queues(card);
                goto out_qdio;
        }
        rc = qeth_qdio_activate(card);
@@ -3371,11 +3364,9 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
        }
 
        QETH_TXQ_STAT_ADD(queue, bufs, count);
-       netif_trans_update(queue->card->dev);
        qdio_flags = QDIO_FLAG_SYNC_OUTPUT;
        if (atomic_read(&queue->set_pci_flags_count))
                qdio_flags |= QDIO_FLAG_PCI_OUT;
-       atomic_add(count, &queue->used_buffers);
        rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags,
                     queue->queue_no, index, count);
        if (rc) {
@@ -3415,7 +3406,6 @@ static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
                         * do_send_packet. So, we check if there is a
                         * packing buffer to be flushed here.
                         */
-                       netif_stop_queue(queue->card->dev);
                        index = queue->next_buf_to_fill;
                        q_was_packing = queue->do_pack;
                        /* queue->do_pack may change */
@@ -3460,7 +3450,7 @@ int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq)
                        goto out;
                }
 
-               qeth_free_qdio_buffers(card);
+               qeth_free_qdio_queues(card);
                card->options.cq = cq;
                rc = 0;
        }
@@ -3486,7 +3476,7 @@ static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err,
        QETH_CARD_TEXT_(card, 5, "qcqherr%d", qdio_err);
 
        if (qdio_err) {
-               netif_stop_queue(card->dev);
+               netif_tx_stop_all_queues(card->dev);
                qeth_schedule_recovery(card);
                return;
        }
@@ -3542,12 +3532,14 @@ static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
        struct qeth_card *card        = (struct qeth_card *) card_ptr;
        struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue];
        struct qeth_qdio_out_buffer *buffer;
+       struct net_device *dev = card->dev;
+       struct netdev_queue *txq;
        int i;
 
        QETH_CARD_TEXT(card, 6, "qdouhdl");
        if (qdio_error & QDIO_ERROR_FATAL) {
                QETH_CARD_TEXT(card, 2, "achkcond");
-               netif_stop_queue(card->dev);
+               netif_tx_stop_all_queues(dev);
                qeth_schedule_recovery(card);
                return;
        }
@@ -3596,30 +3588,29 @@ static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
        if (card->info.type != QETH_CARD_TYPE_IQD)
                qeth_check_outbound_queue(queue);
 
-       netif_wake_queue(queue->card->dev);
-}
-
-/* We cannot use outbound queue 3 for unicast packets on HiperSockets */
-static inline int qeth_cut_iqd_prio(struct qeth_card *card, int queue_num)
-{
-       if ((card->info.type == QETH_CARD_TYPE_IQD) && (queue_num == 3))
-               return 2;
-       return queue_num;
+       if (IS_IQD(card))
+               __queue = qeth_iqd_translate_txq(dev, __queue);
+       txq = netdev_get_tx_queue(dev, __queue);
+       /* xmit may have observed the full-condition, but not yet stopped the
+        * txq. In which case the code below won't trigger. So before returning,
+        * xmit will re-check the txq's fill level and wake it up if needed.
+        */
+       if (netif_tx_queue_stopped(txq) && !qeth_out_queue_is_full(queue))
+               netif_tx_wake_queue(txq);
 }
 
 /**
  * Note: Function assumes that we have 4 outbound queues.
  */
-int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb,
-                           int ipv)
+int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb)
 {
-       __be16 *tci;
+       struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
        u8 tos;
 
        switch (card->qdio.do_prio_queueing) {
        case QETH_PRIO_Q_ING_TOS:
        case QETH_PRIO_Q_ING_PREC:
-               switch (ipv) {
+               switch (qeth_get_ip_version(skb)) {
                case 4:
                        tos = ipv4_get_dsfield(ip_hdr(skb));
                        break;
@@ -3630,9 +3621,9 @@ int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb,
                        return card->qdio.default_out_queue;
                }
                if (card->qdio.do_prio_queueing == QETH_PRIO_Q_ING_PREC)
-                       return qeth_cut_iqd_prio(card, ~tos >> 6 & 3);
+                       return ~tos >> 6 & 3;
                if (tos & IPTOS_MINCOST)
-                       return qeth_cut_iqd_prio(card, 3);
+                       return 3;
                if (tos & IPTOS_RELIABILITY)
                        return 2;
                if (tos & IPTOS_THROUGHPUT)
@@ -3643,12 +3634,11 @@ int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb,
        case QETH_PRIO_Q_ING_SKB:
                if (skb->priority > 5)
                        return 0;
-               return qeth_cut_iqd_prio(card, ~skb->priority >> 1 & 3);
+               return ~skb->priority >> 1 & 3;
        case QETH_PRIO_Q_ING_VLAN:
-               tci = &((struct ethhdr *)skb->data)->h_proto;
-               if (be16_to_cpu(*tci) == ETH_P_8021Q)
-                       return qeth_cut_iqd_prio(card,
-                       ~be16_to_cpu(*(tci + 1)) >> (VLAN_PRIO_SHIFT + 1) & 3);
+               if (veth->h_vlan_proto == htons(ETH_P_8021Q))
+                       return ~ntohs(veth->h_vlan_TCI) >>
+                              (VLAN_PRIO_SHIFT + 1) & 3;
                break;
        default:
                break;
@@ -3860,11 +3850,13 @@ static void __qeth_fill_buffer(struct sk_buff *skb,
  *             from qeth_core_header_cache.
  * @offset:    when mapping the skb, start at skb->data + offset
  * @hd_len:    if > 0, build a dedicated header element of this size
+ * flush:      Prepare the buffer to be flushed, regardless of its fill level.
  */
 static int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
                            struct qeth_qdio_out_buffer *buf,
                            struct sk_buff *skb, struct qeth_hdr *hdr,
-                           unsigned int offset, unsigned int hd_len)
+                           unsigned int offset, unsigned int hd_len,
+                           bool flush)
 {
        struct qdio_buffer *buffer = buf->buffer;
        bool is_first_elem = true;
@@ -3893,8 +3885,8 @@ static int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
 
                QETH_TXQ_STAT_INC(queue, skbs_pack);
                /* If the buffer still has free elements, keep using it. */
-               if (buf->next_element_to_fill <
-                   QETH_MAX_BUFFER_ELEMENTS(queue->card))
+               if (!flush && buf->next_element_to_fill <
+                             QETH_MAX_BUFFER_ELEMENTS(queue->card))
                        return 0;
        }
 
@@ -3911,15 +3903,31 @@ static int qeth_do_send_packet_fast(struct qeth_qdio_out_q *queue,
 {
        int index = queue->next_buf_to_fill;
        struct qeth_qdio_out_buffer *buffer = queue->bufs[index];
+       struct netdev_queue *txq;
+       bool stopped = false;
 
-       /*
-        * check if buffer is empty to make sure that we do not 'overtake'
-        * ourselves and try to fill a buffer that is already primed
+       /* Just a sanity check, the wake/stop logic should ensure that we always
+        * get a free buffer.
         */
        if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
                return -EBUSY;
-       qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len);
+
+       txq = netdev_get_tx_queue(queue->card->dev, skb_get_queue_mapping(skb));
+
+       if (atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) {
+               /* If a TX completion happens right _here_ and misses to wake
+                * the txq, then our re-check below will catch the race.
+                */
+               QETH_TXQ_STAT_INC(queue, stopped);
+               netif_tx_stop_queue(txq);
+               stopped = true;
+       }
+
+       qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len, stopped);
        qeth_flush_buffers(queue, index, 1);
+
+       if (stopped && !qeth_out_queue_is_full(queue))
+               netif_tx_start_queue(txq);
        return 0;
 }
 
@@ -3929,6 +3937,8 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
                        int elements_needed)
 {
        struct qeth_qdio_out_buffer *buffer;
+       struct netdev_queue *txq;
+       bool stopped = false;
        int start_index;
        int flush_count = 0;
        int do_pack = 0;
@@ -3940,14 +3950,17 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
                              QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED);
        start_index = queue->next_buf_to_fill;
        buffer = queue->bufs[queue->next_buf_to_fill];
-       /*
-        * check if buffer is empty to make sure that we do not 'overtake'
-        * ourselves and try to fill a buffer that is already primed
+
+       /* Just a sanity check, the wake/stop logic should ensure that we always
+        * get a free buffer.
         */
        if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) {
                atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
                return -EBUSY;
        }
+
+       txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb));
+
        /* check if we need to switch packing state of this queue */
        qeth_switch_to_packing_if_needed(queue);
        if (queue->do_pack) {
@@ -3962,8 +3975,8 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
                                (queue->next_buf_to_fill + 1) %
                                QDIO_MAX_BUFFERS_PER_Q;
                        buffer = queue->bufs[queue->next_buf_to_fill];
-                       /* we did a step forward, so check buffer state
-                        * again */
+
+                       /* We stepped forward, so sanity-check again: */
                        if (atomic_read(&buffer->state) !=
                            QETH_QDIO_BUF_EMPTY) {
                                qeth_flush_buffers(queue, start_index,
@@ -3976,8 +3989,18 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
                }
        }
 
-       flush_count += qeth_fill_buffer(queue, buffer, skb, hdr, offset,
-                                       hd_len);
+       if (buffer->next_element_to_fill == 0 &&
+           atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) {
+               /* If a TX completion happens right _here_ and misses to wake
+                * the txq, then our re-check below will catch the race.
+                */
+               QETH_TXQ_STAT_INC(queue, stopped);
+               netif_tx_stop_queue(txq);
+               stopped = true;
+       }
+
+       flush_count += qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len,
+                                       stopped);
        if (flush_count)
                qeth_flush_buffers(queue, start_index, flush_count);
        else if (!atomic_read(&queue->set_pci_flags_count))
@@ -4008,6 +4031,8 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
        if (do_pack)
                QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_count);
 
+       if (stopped && !qeth_out_queue_is_full(queue))
+               netif_tx_start_queue(txq);
        return rc;
 }
 EXPORT_SYMBOL_GPL(qeth_do_send_packet);
@@ -4094,9 +4119,6 @@ int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
        } else {
                if (!push_len)
                        kmem_cache_free(qeth_core_header_cache, hdr);
-               if (rc == -EBUSY)
-                       /* roll back to ETH header */
-                       skb_pull(skb, push_len);
        }
        return rc;
 }
@@ -4341,7 +4363,6 @@ void qeth_tx_timeout(struct net_device *dev)
 
        card = dev->ml_priv;
        QETH_CARD_TEXT(card, 4, "txtimeo");
-       QETH_CARD_STAT_INC(card, tx_errors);
        qeth_schedule_recovery(card);
 }
 EXPORT_SYMBOL_GPL(qeth_tx_timeout);
@@ -4930,7 +4951,7 @@ static void qeth_core_free_card(struct qeth_card *card)
        qeth_clean_channel(&card->write);
        qeth_clean_channel(&card->data);
        destroy_workqueue(card->event_wq);
-       qeth_free_qdio_buffers(card);
+       qeth_free_qdio_queues(card);
        unregister_service_level(&card->qeth_service_level);
        dev_set_drvdata(&card->gdev->dev, NULL);
        kfree(card);
@@ -4979,7 +5000,9 @@ int qeth_core_hardsetup_card(struct qeth_card *card, bool *carrier_ok)
 
        QETH_DBF_TEXT(SETUP, 2, "hrdsetup");
        atomic_set(&card->force_alloc_skb, 0);
-       qeth_update_from_chp_desc(card);
+       rc = qeth_update_from_chp_desc(card);
+       if (rc)
+               return rc;
 retry:
        if (retries < 3)
                QETH_DBF_MESSAGE(2, "Retrying to do IDX activates on device %x.\n",
@@ -5557,13 +5580,17 @@ static struct net_device *qeth_alloc_netdev(struct qeth_card *card)
 
        switch (card->info.type) {
        case QETH_CARD_TYPE_IQD:
-               dev = alloc_netdev(0, "hsi%d", NET_NAME_UNKNOWN, ether_setup);
+               dev = alloc_netdev_mqs(0, "hsi%d", NET_NAME_UNKNOWN,
+                                      ether_setup, QETH_MAX_QUEUES, 1);
+               break;
+       case QETH_CARD_TYPE_OSM:
+               dev = alloc_etherdev(0);
                break;
        case QETH_CARD_TYPE_OSN:
                dev = alloc_netdev(0, "osn%d", NET_NAME_UNKNOWN, ether_setup);
                break;
        default:
-               dev = alloc_etherdev(0);
+               dev = alloc_etherdev_mqs(0, QETH_MAX_QUEUES, 1);
        }
 
        if (!dev)
@@ -5585,8 +5612,10 @@ static struct net_device *qeth_alloc_netdev(struct qeth_card *card)
                dev->priv_flags &= ~IFF_TX_SKB_SHARING;
                dev->hw_features |= NETIF_F_SG;
                dev->vlan_features |= NETIF_F_SG;
-               if (IS_IQD(card))
+               if (IS_IQD(card)) {
+                       netif_set_real_num_tx_queues(dev, QETH_IQD_MIN_TXQ);
                        dev->features |= NETIF_F_SG;
+               }
        }
 
        return dev;
@@ -5636,14 +5665,16 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
        }
 
        qeth_setup_card(card);
-       qeth_update_from_chp_desc(card);
-
        card->dev = qeth_alloc_netdev(card);
        if (!card->dev) {
                rc = -ENOMEM;
                goto err_card;
        }
 
+       card->qdio.no_out_queues = card->dev->num_tx_queues;
+       rc = qeth_update_from_chp_desc(card);
+       if (rc)
+               goto err_chp_desc;
        qeth_determine_capabilities(card);
        enforced_disc = qeth_enforce_discipline(card);
        switch (enforced_disc) {
@@ -5670,6 +5701,7 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
 err_disc:
        qeth_core_free_discipline(card);
 err_load:
+err_chp_desc:
        free_netdev(card->dev);
 err_card:
        qeth_core_free_card(card);
@@ -5732,7 +5764,7 @@ static void qeth_core_shutdown(struct ccwgroup_device *gdev)
        if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap)
                qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
        qeth_qdio_clear_card(card, 0);
-       qeth_clear_qdio_buffers(card);
+       qeth_drain_output_queues(card);
        qdio_free(CARD_DDEV(card));
 }
 
@@ -6188,7 +6220,6 @@ void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
        stats->rx_errors = card->stats.rx_errors;
        stats->rx_dropped = card->stats.rx_dropped;
        stats->multicast = card->stats.rx_multicast;
-       stats->tx_errors = card->stats.tx_errors;
 
        for (i = 0; i < card->qdio.no_out_queues; i++) {
                queue = card->qdio.out_qs[i];
@@ -6201,6 +6232,15 @@ void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
 }
 EXPORT_SYMBOL_GPL(qeth_get_stats64);
 
+u16 qeth_iqd_select_queue(struct net_device *dev, struct sk_buff *skb,
+                         u8 cast_type, struct net_device *sb_dev)
+{
+       if (cast_type != RTN_UNICAST)
+               return QETH_IQD_MCAST_TXQ;
+       return QETH_IQD_MIN_UCAST_TXQ;
+}
+EXPORT_SYMBOL_GPL(qeth_iqd_select_queue);
+
 int qeth_open(struct net_device *dev)
 {
        struct qeth_card *card = dev->ml_priv;
@@ -6211,7 +6251,7 @@ int qeth_open(struct net_device *dev)
                return -EIO;
 
        card->data.state = CH_STATE_UP;
-       netif_start_queue(dev);
+       netif_tx_start_all_queues(dev);
 
        napi_enable(&card->napi);
        local_bh_disable();
index 56deeb6f7bc0746c13db3b9b0745c1078e6aeb24..cea4a0bbc303cb5d7b3f5027ccf1e505d23c52fe 100644 (file)
@@ -198,6 +198,9 @@ static ssize_t qeth_dev_prioqing_store(struct device *dev,
        if (!card)
                return -EINVAL;
 
+       if (IS_IQD(card))
+               return -EOPNOTSUPP;
+
        mutex_lock(&card->conf_mutex);
        if (card->state != CARD_STATE_DOWN) {
                rc = -EPERM;
@@ -239,10 +242,6 @@ static ssize_t qeth_dev_prioqing_store(struct device *dev,
                card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
                card->qdio.default_out_queue = 2;
        } else if (sysfs_streq(buf, "no_prio_queueing:3")) {
-               if (card->info.type == QETH_CARD_TYPE_IQD) {
-                       rc = -EPERM;
-                       goto out;
-               }
                card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
                card->qdio.default_out_queue = 3;
        } else if (sysfs_streq(buf, "no_prio_queueing")) {
index 93a53fed4cf8908698592988691390a4e8ea2203..4166eb29f0bdd9fcbfa94f7d654f81c0028a192c 100644 (file)
@@ -38,6 +38,7 @@ static const struct qeth_stats txq_stats[] = {
        QETH_TXQ_STAT("linearized+error skbs", skbs_linearized_fail),
        QETH_TXQ_STAT("TSO bytes", tso_bytes),
        QETH_TXQ_STAT("Packing mode switches", packing_mode_switch),
+       QETH_TXQ_STAT("Queue stopped", stopped),
 };
 
 static const struct qeth_stats card_stats[] = {
@@ -154,6 +155,21 @@ static void qeth_get_drvinfo(struct net_device *dev,
                 CARD_RDEV_ID(card), CARD_WDEV_ID(card), CARD_DDEV_ID(card));
 }
 
+static void qeth_get_channels(struct net_device *dev,
+                             struct ethtool_channels *channels)
+{
+       struct qeth_card *card = dev->ml_priv;
+
+       channels->max_rx = dev->num_rx_queues;
+       channels->max_tx = card->qdio.no_out_queues;
+       channels->max_other = 0;
+       channels->max_combined = 0;
+       channels->rx_count = dev->real_num_rx_queues;
+       channels->tx_count = dev->real_num_tx_queues;
+       channels->other_count = 0;
+       channels->combined_count = 0;
+}
+
 /* Helper function to fill 'advertising' and 'supported' which are the same. */
 /* Autoneg and full-duplex are supported and advertised unconditionally.     */
 /* Always advertise and support all speeds up to specified, and only one     */
@@ -359,6 +375,7 @@ const struct ethtool_ops qeth_ethtool_ops = {
        .get_ethtool_stats = qeth_get_ethtool_stats,
        .get_sset_count = qeth_get_sset_count,
        .get_drvinfo = qeth_get_drvinfo,
+       .get_channels = qeth_get_channels,
        .get_link_ksettings = qeth_get_link_ksettings,
 };
 
index 5549c66c6b5da5227806cf40b710db6e770f4930..e26a6dff286fd134401089f91f7214baa58730de 100644 (file)
@@ -161,10 +161,8 @@ static void qeth_l2_drain_rx_mode_cache(struct qeth_card *card)
        }
 }
 
-static int qeth_l2_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
+static int qeth_l2_get_cast_type(struct sk_buff *skb)
 {
-       if (card->info.type == QETH_CARD_TYPE_OSN)
-               return RTN_UNICAST;
        if (is_broadcast_ether_addr(skb->data))
                return RTN_BROADCAST;
        if (is_multicast_ether_addr(skb->data))
@@ -299,7 +297,7 @@ static void qeth_l2_stop_card(struct qeth_card *card)
        }
        if (card->state == CARD_STATE_HARDSETUP) {
                qeth_qdio_clear_card(card, 0);
-               qeth_clear_qdio_buffers(card);
+               qeth_drain_output_queues(card);
                qeth_clear_working_pool_list(card);
                card->state = CARD_STATE_DOWN;
        }
@@ -603,37 +601,44 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb,
                                           struct net_device *dev)
 {
        struct qeth_card *card = dev->ml_priv;
-       int cast_type = qeth_l2_get_cast_type(card, skb);
-       int ipv = qeth_get_ip_version(skb);
+       u16 txq = skb_get_queue_mapping(skb);
        struct qeth_qdio_out_q *queue;
        int tx_bytes = skb->len;
        int rc;
 
-       queue = qeth_get_tx_queue(card, skb, ipv, cast_type);
-
-       netif_stop_queue(dev);
+       if (IS_IQD(card))
+               txq = qeth_iqd_translate_txq(dev, txq);
+       queue = card->qdio.out_qs[txq];
 
        if (IS_OSN(card))
                rc = qeth_l2_xmit_osn(card, skb, queue);
        else
-               rc = qeth_xmit(card, skb, queue, ipv, cast_type,
-                              qeth_l2_fill_header);
+               rc = qeth_xmit(card, skb, queue, qeth_get_ip_version(skb),
+                              qeth_l2_get_cast_type(skb), qeth_l2_fill_header);
 
        if (!rc) {
                QETH_TXQ_STAT_INC(queue, tx_packets);
                QETH_TXQ_STAT_ADD(queue, tx_bytes, tx_bytes);
-               netif_wake_queue(dev);
                return NETDEV_TX_OK;
-       } else if (rc == -EBUSY) {
-               return NETDEV_TX_BUSY;
-       } /* else fall through */
+       }
 
        QETH_TXQ_STAT_INC(queue, tx_dropped);
        kfree_skb(skb);
-       netif_wake_queue(dev);
        return NETDEV_TX_OK;
 }
 
+static u16 qeth_l2_select_queue(struct net_device *dev, struct sk_buff *skb,
+                               struct net_device *sb_dev)
+{
+       struct qeth_card *card = dev->ml_priv;
+
+       if (IS_IQD(card))
+               return qeth_iqd_select_queue(dev, skb,
+                                            qeth_l2_get_cast_type(skb),
+                                            sb_dev);
+       return qeth_get_priority_queue(card, skb);
+}
+
 static const struct device_type qeth_l2_devtype = {
        .name = "qeth_layer2",
        .groups = qeth_l2_attr_groups,
@@ -687,6 +692,7 @@ static const struct net_device_ops qeth_l2_netdev_ops = {
        .ndo_get_stats64        = qeth_get_stats64,
        .ndo_start_xmit         = qeth_l2_hard_start_xmit,
        .ndo_features_check     = qeth_features_check,
+       .ndo_select_queue       = qeth_l2_select_queue,
        .ndo_validate_addr      = qeth_l2_validate_addr,
        .ndo_set_rx_mode        = qeth_l2_set_rx_mode,
        .ndo_do_ioctl           = qeth_do_ioctl,
index f804d27eb569172c5d58d19faa1e320aedb1f089..4c939410513842d4a9fbb211084957afdca870af 100644 (file)
@@ -1433,7 +1433,7 @@ static void qeth_l3_stop_card(struct qeth_card *card)
        }
        if (card->state == CARD_STATE_HARDSETUP) {
                qeth_qdio_clear_card(card, 0);
-               qeth_clear_qdio_buffers(card);
+               qeth_drain_output_queues(card);
                qeth_clear_working_pool_list(card);
                card->state = CARD_STATE_DOWN;
        }
@@ -2036,7 +2036,6 @@ static void qeth_l3_fixup_headers(struct sk_buff *skb)
 static int qeth_l3_xmit(struct qeth_card *card, struct sk_buff *skb,
                        struct qeth_qdio_out_q *queue, int ipv, int cast_type)
 {
-       unsigned char eth_hdr[ETH_HLEN];
        unsigned int hw_hdr_len;
        int rc;
 
@@ -2046,45 +2045,44 @@ static int qeth_l3_xmit(struct qeth_card *card, struct sk_buff *skb,
        rc = skb_cow_head(skb, hw_hdr_len - ETH_HLEN);
        if (rc)
                return rc;
-       skb_copy_from_linear_data(skb, eth_hdr, ETH_HLEN);
        skb_pull(skb, ETH_HLEN);
 
        qeth_l3_fixup_headers(skb);
-       rc = qeth_xmit(card, skb, queue, ipv, cast_type, qeth_l3_fill_header);
-       if (rc == -EBUSY) {
-               /* roll back to ETH header */
-               skb_push(skb, ETH_HLEN);
-               skb_copy_to_linear_data(skb, eth_hdr, ETH_HLEN);
-       }
-       return rc;
+       return qeth_xmit(card, skb, queue, ipv, cast_type, qeth_l3_fill_header);
 }
 
 static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
                                           struct net_device *dev)
 {
-       int cast_type = qeth_l3_get_cast_type(skb);
        struct qeth_card *card = dev->ml_priv;
+       u16 txq = skb_get_queue_mapping(skb);
        int ipv = qeth_get_ip_version(skb);
        struct qeth_qdio_out_q *queue;
        int tx_bytes = skb->len;
-       int rc;
-
-       queue = qeth_get_tx_queue(card, skb, ipv, cast_type);
+       int cast_type, rc;
 
        if (IS_IQD(card)) {
+               queue = card->qdio.out_qs[qeth_iqd_translate_txq(dev, txq)];
+
                if (card->options.sniffer)
                        goto tx_drop;
                if ((card->options.cq != QETH_CQ_ENABLED && !ipv) ||
                    (card->options.cq == QETH_CQ_ENABLED &&
                     skb->protocol != htons(ETH_P_AF_IUCV)))
                        goto tx_drop;
+
+               if (txq == QETH_IQD_MCAST_TXQ)
+                       cast_type = qeth_l3_get_cast_type(skb);
+               else
+                       cast_type = RTN_UNICAST;
+       } else {
+               queue = card->qdio.out_qs[txq];
+               cast_type = qeth_l3_get_cast_type(skb);
        }
 
        if (cast_type == RTN_BROADCAST && !card->info.broadcast_capable)
                goto tx_drop;
 
-       netif_stop_queue(dev);
-
        if (ipv == 4 || IS_IQD(card))
                rc = qeth_l3_xmit(card, skb, queue, ipv, cast_type);
        else
@@ -2094,16 +2092,12 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
        if (!rc) {
                QETH_TXQ_STAT_INC(queue, tx_packets);
                QETH_TXQ_STAT_ADD(queue, tx_bytes, tx_bytes);
-               netif_wake_queue(dev);
                return NETDEV_TX_OK;
-       } else if (rc == -EBUSY) {
-               return NETDEV_TX_BUSY;
-       } /* else fall through */
+       }
 
 tx_drop:
        QETH_TXQ_STAT_INC(queue, tx_dropped);
        kfree_skb(skb);
-       netif_wake_queue(dev);
        return NETDEV_TX_OK;
 }
 
@@ -2147,11 +2141,27 @@ static netdev_features_t qeth_l3_osa_features_check(struct sk_buff *skb,
        return qeth_features_check(skb, dev, features);
 }
 
+static u16 qeth_l3_iqd_select_queue(struct net_device *dev, struct sk_buff *skb,
+                                   struct net_device *sb_dev)
+{
+       return qeth_iqd_select_queue(dev, skb, qeth_l3_get_cast_type(skb),
+                                    sb_dev);
+}
+
+static u16 qeth_l3_osa_select_queue(struct net_device *dev, struct sk_buff *skb,
+                                   struct net_device *sb_dev)
+{
+       struct qeth_card *card = dev->ml_priv;
+
+       return qeth_get_priority_queue(card, skb);
+}
+
 static const struct net_device_ops qeth_l3_netdev_ops = {
        .ndo_open               = qeth_open,
        .ndo_stop               = qeth_stop,
        .ndo_get_stats64        = qeth_get_stats64,
        .ndo_start_xmit         = qeth_l3_hard_start_xmit,
+       .ndo_select_queue       = qeth_l3_iqd_select_queue,
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_set_rx_mode        = qeth_l3_set_rx_mode,
        .ndo_do_ioctl           = qeth_do_ioctl,
@@ -2168,6 +2178,7 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = {
        .ndo_get_stats64        = qeth_get_stats64,
        .ndo_start_xmit         = qeth_l3_hard_start_xmit,
        .ndo_features_check     = qeth_l3_osa_features_check,
+       .ndo_select_queue       = qeth_l3_osa_select_queue,
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_set_rx_mode        = qeth_l3_set_rx_mode,
        .ndo_do_ioctl           = qeth_do_ioctl,
index 744a64680d5b0d16c982012bfe2b351becd54a9a..e8fc28dba8dfc3521532c3c87d26d199b8ed9b6c 100644 (file)
@@ -624,6 +624,20 @@ static void zfcp_erp_strategy_memwait(struct zfcp_erp_action *erp_action)
        add_timer(&erp_action->timer);
 }
 
+void zfcp_erp_port_forced_reopen_all(struct zfcp_adapter *adapter,
+                                    int clear, char *dbftag)
+{
+       unsigned long flags;
+       struct zfcp_port *port;
+
+       write_lock_irqsave(&adapter->erp_lock, flags);
+       read_lock(&adapter->port_list_lock);
+       list_for_each_entry(port, &adapter->port_list, list)
+               _zfcp_erp_port_forced_reopen(port, clear, dbftag);
+       read_unlock(&adapter->port_list_lock);
+       write_unlock_irqrestore(&adapter->erp_lock, flags);
+}
+
 static void _zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter,
                                      int clear, char *dbftag)
 {
@@ -1341,6 +1355,9 @@ static void zfcp_erp_try_rport_unblock(struct zfcp_port *port)
                struct zfcp_scsi_dev *zsdev = sdev_to_zfcp(sdev);
                int lun_status;
 
+               if (sdev->sdev_state == SDEV_DEL ||
+                   sdev->sdev_state == SDEV_CANCEL)
+                       continue;
                if (zsdev->port != port)
                        continue;
                /* LUN under port of interest */
index 3fce47b0b21b55142a64bb3b838bf28168ddd89e..c6acca521ffec71ee7b3f7e7231a32b18fdceff7 100644 (file)
@@ -70,6 +70,8 @@ extern void zfcp_erp_port_reopen(struct zfcp_port *port, int clear,
                                 char *dbftag);
 extern void zfcp_erp_port_shutdown(struct zfcp_port *, int, char *);
 extern void zfcp_erp_port_forced_reopen(struct zfcp_port *, int, char *);
+extern void zfcp_erp_port_forced_reopen_all(struct zfcp_adapter *adapter,
+                                           int clear, char *dbftag);
 extern void zfcp_erp_set_lun_status(struct scsi_device *, u32);
 extern void zfcp_erp_clear_lun_status(struct scsi_device *, u32);
 extern void zfcp_erp_lun_reopen(struct scsi_device *, int, char *);
index db00b5e3abbe361143c83dc5d6becfaa0e62aac0..33eddb02ee300238897f0f9018119717b387fd58 100644 (file)
@@ -239,10 +239,6 @@ static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
        list_for_each_entry(port, &adapter->port_list, list) {
                if ((port->d_id & range) == (ntoh24(page->rscn_fid) & range))
                        zfcp_fc_test_link(port);
-               if (!port->d_id)
-                       zfcp_erp_port_reopen(port,
-                                            ZFCP_STATUS_COMMON_ERP_FAILED,
-                                            "fcrscn1");
        }
        read_unlock_irqrestore(&adapter->port_list_lock, flags);
 }
@@ -250,6 +246,7 @@ static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
 static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req)
 {
        struct fsf_status_read_buffer *status_buffer = (void *)fsf_req->data;
+       struct zfcp_adapter *adapter = fsf_req->adapter;
        struct fc_els_rscn *head;
        struct fc_els_rscn_page *page;
        u16 i;
@@ -263,6 +260,22 @@ static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req)
        no_entries = be16_to_cpu(head->rscn_plen) /
                sizeof(struct fc_els_rscn_page);
 
+       if (no_entries > 1) {
+               /* handle failed ports */
+               unsigned long flags;
+               struct zfcp_port *port;
+
+               read_lock_irqsave(&adapter->port_list_lock, flags);
+               list_for_each_entry(port, &adapter->port_list, list) {
+                       if (port->d_id)
+                               continue;
+                       zfcp_erp_port_reopen(port,
+                                            ZFCP_STATUS_COMMON_ERP_FAILED,
+                                            "fcrscn1");
+               }
+               read_unlock_irqrestore(&adapter->port_list_lock, flags);
+       }
+
        for (i = 1; i < no_entries; i++) {
                /* skip head and start with 1st element */
                page++;
index f4f6a07c52220234fb0e865ca3f0d87a2d2fdbe0..221d0dfb849329eb5ebf1758004628301b500ba8 100644 (file)
@@ -368,6 +368,10 @@ static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
        struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
        int ret = SUCCESS, fc_ret;
 
+       if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE)) {
+               zfcp_erp_port_forced_reopen_all(adapter, 0, "schrh_p");
+               zfcp_erp_wait(adapter);
+       }
        zfcp_erp_adapter_reopen(adapter, 0, "schrh_1");
        zfcp_erp_wait(adapter);
        fc_ret = fc_block_scsi_eh(scpnt);
index 1df5171594b89dc70087def629200c30b4149d1f..11fb68d7e60de6ed5ab388250691b647cbc030bc 100644 (file)
@@ -2640,9 +2640,14 @@ static inline unsigned int cap_to_cyls(sector_t capacity, unsigned divisor)
        return capacity;
 }
 
+static inline int aac_pci_offline(struct aac_dev *dev)
+{
+       return pci_channel_offline(dev->pdev) || dev->handle_pci_error;
+}
+
 static inline int aac_adapter_check_health(struct aac_dev *dev)
 {
-       if (unlikely(pci_channel_offline(dev->pdev)))
+       if (unlikely(aac_pci_offline(dev)))
                return -1;
 
        return (dev)->a_ops.adapter_check_health(dev);
index e67e032936ef015b66c242eaf9c3111cfb3812c2..78430a7b294c6e651024300d86aaec5eecbe53c4 100644 (file)
@@ -672,7 +672,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
                                        return -ETIMEDOUT;
                                }
 
-                               if (unlikely(pci_channel_offline(dev->pdev)))
+                               if (unlikely(aac_pci_offline(dev)))
                                        return -EFAULT;
 
                                if ((blink = aac_adapter_check_health(dev)) > 0) {
@@ -772,7 +772,7 @@ int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback,
 
                spin_unlock_irqrestore(&fibptr->event_lock, flags);
 
-               if (unlikely(pci_channel_offline(dev->pdev)))
+               if (unlikely(aac_pci_offline(dev)))
                        return -EFAULT;
 
                fibptr->flags |= FIB_CONTEXT_FLAG_WAIT;
index 462560b2855e25e1204064c89350fed5653bf36a..469d0bc9f5fe4db6e756e4270bf522e63cd15566 100644 (file)
@@ -1713,8 +1713,11 @@ csio_scsi_err_handler(struct csio_hw *hw, struct csio_ioreq *req)
        }
 
 out:
-       if (req->nsge > 0)
+       if (req->nsge > 0) {
                scsi_dma_unmap(cmnd);
+               if (req->dcopy && (host_status == DID_OK))
+                       host_status = csio_scsi_copy_to_sgl(hw, req);
+       }
 
        cmnd->result = (((host_status) << 16) | scsi_status);
        cmnd->scsi_done(cmnd);
index dbaa4f131433abde497c843c17e2b0e677b4544e..3ad997ac351034bd2e556a17c6f9d14addf2921a 100644 (file)
@@ -139,6 +139,7 @@ static const struct {
        { IBMVFC_FC_FAILURE, IBMVFC_VENDOR_SPECIFIC, DID_ERROR, 1, 1, "vendor specific" },
 
        { IBMVFC_FC_SCSI_ERROR, 0, DID_OK, 1, 0, "SCSI error" },
+       { IBMVFC_FC_SCSI_ERROR, IBMVFC_COMMAND_FAILED, DID_ERROR, 0, 1, "PRLI to device failed." },
 };
 
 static void ibmvfc_npiv_login(struct ibmvfc_host *);
@@ -1494,9 +1495,9 @@ static void ibmvfc_log_error(struct ibmvfc_event *evt)
        if (rsp->flags & FCP_RSP_LEN_VALID)
                rsp_code = rsp->data.info.rsp_code;
 
-       scmd_printk(KERN_ERR, cmnd, "Command (%02X) failed: %s (%x:%x) "
+       scmd_printk(KERN_ERR, cmnd, "Command (%02X) : %s (%x:%x) "
                    "flags: %x fcp_rsp: %x, resid=%d, scsi_status: %x\n",
-                   cmnd->cmnd[0], err, vfc_cmd->status, vfc_cmd->error,
+                   cmnd->cmnd[0], err, be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error),
                    rsp->flags, rsp_code, scsi_get_resid(cmnd), rsp->scsi_status);
 }
 
@@ -2022,7 +2023,7 @@ static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc)
                sdev_printk(KERN_ERR, sdev, "%s reset failed: %s (%x:%x) "
                            "flags: %x fcp_rsp: %x, scsi_status: %x\n", desc,
                            ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)),
-                           rsp_iu.cmd.status, rsp_iu.cmd.error, fc_rsp->flags, rsp_code,
+                           be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error), fc_rsp->flags, rsp_code,
                            fc_rsp->scsi_status);
                rsp_rc = -EIO;
        } else
@@ -2381,7 +2382,7 @@ static int ibmvfc_abort_task_set(struct scsi_device *sdev)
                sdev_printk(KERN_ERR, sdev, "Abort failed: %s (%x:%x) "
                            "flags: %x fcp_rsp: %x, scsi_status: %x\n",
                            ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)),
-                           rsp_iu.cmd.status, rsp_iu.cmd.error, fc_rsp->flags, rsp_code,
+                           be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error), fc_rsp->flags, rsp_code,
                            fc_rsp->scsi_status);
                rsp_rc = -EIO;
        } else
@@ -2755,16 +2756,18 @@ static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost)
                ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
                if (crq->format == IBMVFC_PARTITION_MIGRATED) {
                        /* We need to re-setup the interpartition connection */
-                       dev_info(vhost->dev, "Re-enabling adapter\n");
+                       dev_info(vhost->dev, "Partition migrated, Re-enabling adapter\n");
                        vhost->client_migrated = 1;
                        ibmvfc_purge_requests(vhost, DID_REQUEUE);
                        ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
                        ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_REENABLE);
-               } else {
-                       dev_err(vhost->dev, "Virtual adapter failed (rc=%d)\n", crq->format);
+               } else if (crq->format == IBMVFC_PARTNER_FAILED || crq->format == IBMVFC_PARTNER_DEREGISTER) {
+                       dev_err(vhost->dev, "Host partner adapter deregistered or failed (rc=%d)\n", crq->format);
                        ibmvfc_purge_requests(vhost, DID_ERROR);
                        ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
                        ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_RESET);
+               } else {
+                       dev_err(vhost->dev, "Received unknown transport event from partner (rc=%d)\n", crq->format);
                }
                return;
        case IBMVFC_CRQ_CMD_RSP:
@@ -3348,7 +3351,7 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
 
                tgt_log(tgt, level, "Process Login failed: %s (%x:%x) rc=0x%02X\n",
                        ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
-                       rsp->status, rsp->error, status);
+                       be16_to_cpu(rsp->status), be16_to_cpu(rsp->error), status);
                break;
        }
 
@@ -3446,9 +3449,10 @@ static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt)
                        ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
 
                tgt_log(tgt, level, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
-                       ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)), rsp->status, rsp->error,
-                       ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), rsp->fc_type,
-                       ibmvfc_get_ls_explain(be16_to_cpu(rsp->fc_explain)), rsp->fc_explain, status);
+                       ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
+                                            be16_to_cpu(rsp->status), be16_to_cpu(rsp->error),
+                       ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), be16_to_cpu(rsp->fc_type),
+                       ibmvfc_get_ls_explain(be16_to_cpu(rsp->fc_explain)), be16_to_cpu(rsp->fc_explain), status);
                break;
        }
 
@@ -3619,7 +3623,7 @@ static void ibmvfc_tgt_adisc_done(struct ibmvfc_event *evt)
                fc_explain = (be32_to_cpu(mad->fc_iu.response[1]) & 0x0000ff00) >> 8;
                tgt_info(tgt, "ADISC failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
                         ibmvfc_get_cmd_error(be16_to_cpu(mad->iu.status), be16_to_cpu(mad->iu.error)),
-                        mad->iu.status, mad->iu.error,
+                        be16_to_cpu(mad->iu.status), be16_to_cpu(mad->iu.error),
                         ibmvfc_get_fc_type(fc_reason), fc_reason,
                         ibmvfc_get_ls_explain(fc_explain), fc_explain, status);
                break;
@@ -3831,9 +3835,10 @@ static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
 
                tgt_log(tgt, level, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
                        ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
-                       rsp->status, rsp->error, ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)),
-                       rsp->fc_type, ibmvfc_get_gs_explain(be16_to_cpu(rsp->fc_explain)),
-                       rsp->fc_explain, status);
+                       be16_to_cpu(rsp->status), be16_to_cpu(rsp->error),
+                       ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), be16_to_cpu(rsp->fc_type),
+                       ibmvfc_get_gs_explain(be16_to_cpu(rsp->fc_explain)), be16_to_cpu(rsp->fc_explain),
+                       status);
                break;
        }
 
@@ -3959,7 +3964,7 @@ static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt)
                level += ibmvfc_retry_host_init(vhost);
                ibmvfc_log(vhost, level, "Discover Targets failed: %s (%x:%x)\n",
                           ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
-                          rsp->status, rsp->error);
+                          be16_to_cpu(rsp->status), be16_to_cpu(rsp->error));
                break;
        case IBMVFC_MAD_DRIVER_FAILED:
                break;
@@ -4024,7 +4029,7 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
                        ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
                ibmvfc_log(vhost, level, "NPIV Login failed: %s (%x:%x)\n",
                           ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
-                                               rsp->status, rsp->error);
+                                               be16_to_cpu(rsp->status), be16_to_cpu(rsp->error));
                ibmvfc_free_event(evt);
                return;
        case IBMVFC_MAD_CRQ_ERROR:
index b81a53c4a9a8b1020a96a85fd5e84cde2adb9900..459cc288ba1d01abe63c28454bf73c7190bb64a4 100644 (file)
@@ -78,9 +78,14 @@ enum ibmvfc_crq_valid {
        IBMVFC_CRQ_XPORT_EVENT          = 0xFF,
 };
 
-enum ibmvfc_crq_format {
+enum ibmvfc_crq_init_msg {
        IBMVFC_CRQ_INIT                 = 0x01,
        IBMVFC_CRQ_INIT_COMPLETE        = 0x02,
+};
+
+enum ibmvfc_crq_xport_evts {
+       IBMVFC_PARTNER_FAILED           = 0x01,
+       IBMVFC_PARTNER_DEREGISTER       = 0x02,
        IBMVFC_PARTITION_MIGRATED       = 0x06,
 };
 
index c98f264f1d83a030ea8a00678fd586ab70059218..a497b2c0cb798e07d240affc72b4621fa1503f34 100644 (file)
@@ -3878,10 +3878,9 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
         * wake up the thread.
         */
        spin_lock(&lpfc_cmd->buf_lock);
-       if (unlikely(lpfc_cmd->cur_iocbq.iocb_flag & LPFC_DRIVER_ABORTED)) {
-               lpfc_cmd->cur_iocbq.iocb_flag &= ~LPFC_DRIVER_ABORTED;
-               if (lpfc_cmd->waitq)
-                       wake_up(lpfc_cmd->waitq);
+       lpfc_cmd->cur_iocbq.iocb_flag &= ~LPFC_DRIVER_ABORTED;
+       if (lpfc_cmd->waitq) {
+               wake_up(lpfc_cmd->waitq);
                lpfc_cmd->waitq = NULL;
        }
        spin_unlock(&lpfc_cmd->buf_lock);
index e57774472e752013ce762912a6ceec512905fc2a..1d8c584ec1e9197595acf2baa61bccae4305b646 100644 (file)
@@ -3281,12 +3281,18 @@ mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
 
        if (smid < ioc->hi_priority_smid) {
                struct scsiio_tracker *st;
+               void *request;
 
                st = _get_st_from_smid(ioc, smid);
                if (!st) {
                        _base_recovery_check(ioc);
                        return;
                }
+
+               /* Clear MPI request frame */
+               request = mpt3sas_base_get_msg_frame(ioc, smid);
+               memset(request, 0, ioc->request_sz);
+
                mpt3sas_base_clear_st(ioc, st);
                _base_recovery_check(ioc);
                return;
index 8bb5b8f9f4d2cdbbc127c73cda4d9672b4adcf77..1ccfbc7eebe0323ce88b1c450e52bb87aba3c45e 100644 (file)
@@ -1462,11 +1462,23 @@ mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid)
 {
        struct scsi_cmnd *scmd = NULL;
        struct scsiio_tracker *st;
+       Mpi25SCSIIORequest_t *mpi_request;
 
        if (smid > 0  &&
            smid <= ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT) {
                u32 unique_tag = smid - 1;
 
+               mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+
+               /*
+                * If SCSI IO request is outstanding at driver level then
+                * DevHandle filed must be non-zero. If DevHandle is zero
+                * then it means that this smid is free at driver level,
+                * so return NULL.
+                */
+               if (!mpi_request->DevHandle)
+                       return scmd;
+
                scmd = scsi_host_find_tag(ioc->shost, unique_tag);
                if (scmd) {
                        st = scsi_cmd_priv(scmd);
index e74a62448ba466a58c2365546b5d1fc34bafa8e9..e5db9a9954dc0cd015577686c8bdb5efda9f78de 100644 (file)
@@ -1392,10 +1392,8 @@ static void qedi_free_nvm_iscsi_cfg(struct qedi_ctx *qedi)
 
 static int qedi_alloc_nvm_iscsi_cfg(struct qedi_ctx *qedi)
 {
-       struct qedi_nvm_iscsi_image nvm_image;
-
        qedi->iscsi_image = dma_alloc_coherent(&qedi->pdev->dev,
-                                              sizeof(nvm_image),
+                                              sizeof(struct qedi_nvm_iscsi_image),
                                               &qedi->nvm_buf_dma, GFP_KERNEL);
        if (!qedi->iscsi_image) {
                QEDI_ERR(&qedi->dbg_ctx, "Could not allocate NVM BUF.\n");
@@ -2236,14 +2234,13 @@ static void qedi_boot_release(void *data)
 static int qedi_get_boot_info(struct qedi_ctx *qedi)
 {
        int ret = 1;
-       struct qedi_nvm_iscsi_image nvm_image;
 
        QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
                  "Get NVM iSCSI CFG image\n");
        ret = qedi_ops->common->nvm_get_image(qedi->cdev,
                                              QED_NVM_IMAGE_ISCSI_CFG,
                                              (char *)qedi->iscsi_image,
-                                             sizeof(nvm_image));
+                                             sizeof(struct qedi_nvm_iscsi_image));
        if (ret)
                QEDI_ERR(&qedi->dbg_ctx,
                         "Could not get NVM image. ret = %d\n", ret);
index 16a18d5d856f91725b33e25df042eb21bba8c20a..6e4f4931ae175f806731d2fcb1fbb4ba655cc885 100644 (file)
@@ -3203,6 +3203,8 @@ static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session,
        if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
                return -EINVAL;
        ep = iscsi_lookup_endpoint(transport_fd);
+       if (!ep)
+               return -EINVAL;
        conn = cls_conn->dd_data;
        qla_conn = conn->dd_data;
        qla_conn->qla_ep = ep->dd_data;
index c4cbfd07b9167f0e29b635b9b24e65a6df3826d9..a08ff3bd63105141840e774fc0af3081aa78178a 100644 (file)
@@ -238,6 +238,7 @@ static struct {
        {"NETAPP", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
        {"LSI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
        {"ENGENIO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
+       {"LENOVO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
        {"SMSC", "USB 2 HS-CF", NULL, BLIST_SPARSELUN | BLIST_INQUIRY_36},
        {"SONY", "CD-ROM CDU-8001", NULL, BLIST_BORKEN},
        {"SONY", "TSL", NULL, BLIST_FORCELUN},          /* DDS3 & DDS4 autoloaders */
index 5a58cbf3a75da9123899ce668934e002933d1416..c14006ac98f91c6bb3c7d360b7bd78720d19df84 100644 (file)
@@ -75,6 +75,7 @@ static const struct scsi_dh_blist scsi_dh_blist[] = {
        {"NETAPP", "INF-01-00",         "rdac", },
        {"LSI", "INF-01-00",            "rdac", },
        {"ENGENIO", "INF-01-00",        "rdac", },
+       {"LENOVO", "DE_Series",         "rdac", },
        {NULL, NULL,                    NULL },
 };
 
index 6a9040faed00c93ba5beeda77ffb5b2cbcb6c07e..3b119ca0cc0ce9ba2cfcc95cf78307a96b1d264b 100644 (file)
@@ -771,6 +771,12 @@ store_state_field(struct device *dev, struct device_attribute *attr,
 
        mutex_lock(&sdev->state_mutex);
        ret = scsi_device_set_state(sdev, state);
+       /*
+        * If the device state changes to SDEV_RUNNING, we need to run
+        * the queue to avoid I/O hang.
+        */
+       if (ret == 0 && state == SDEV_RUNNING)
+               blk_mq_run_hw_queues(sdev->request_queue, true);
        mutex_unlock(&sdev->state_mutex);
 
        return ret == 0 ? count : -EINVAL;
index 251db30d0882dc83556a688798c4f277072edefe..2b2bc4b49d78a36c737cd9e70666b900ec0fc2b2 100644 (file)
@@ -1415,11 +1415,6 @@ static void sd_release(struct gendisk *disk, fmode_t mode)
                        scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW);
        }
 
-       /*
-        * XXX and what if there are packets in flight and this close()
-        * XXX is followed by a "rmmod sd_mod"?
-        */
-
        scsi_disk_put(sdkp);
 }
 
@@ -3076,6 +3071,9 @@ static bool sd_validate_opt_xfer_size(struct scsi_disk *sdkp,
        unsigned int opt_xfer_bytes =
                logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
 
+       if (sdkp->opt_xfer_blocks == 0)
+               return false;
+
        if (sdkp->opt_xfer_blocks > dev_max) {
                sd_first_printk(KERN_WARNING, sdkp,
                                "Optimal transfer size %u logical blocks " \
@@ -3505,9 +3503,21 @@ static void scsi_disk_release(struct device *dev)
 {
        struct scsi_disk *sdkp = to_scsi_disk(dev);
        struct gendisk *disk = sdkp->disk;
-       
+       struct request_queue *q = disk->queue;
+
        ida_free(&sd_index_ida, sdkp->index);
 
+       /*
+        * Wait until all requests that are in progress have completed.
+        * This is necessary to avoid that e.g. scsi_end_request() crashes
+        * due to clearing the disk->private_data pointer. Wait from inside
+        * scsi_disk_release() instead of from sd_release() to avoid that
+        * freezing and unfreezing the request queue affects user space I/O
+        * in case multiple processes open a /dev/sd... node concurrently.
+        */
+       blk_mq_freeze_queue(q);
+       blk_mq_unfreeze_queue(q);
+
        disk->private_data = NULL;
        put_disk(disk);
        put_device(&sdkp->device->sdev_gendev);
index 84380bae20f1ec350d5209931cdca018351199f1..8472de1007fffca12f41823e3ed8e45dac1ee06d 100644 (file)
@@ -385,7 +385,7 @@ enum storvsc_request_type {
  * This is the end of Protocol specific defines.
  */
 
-static int storvsc_ringbuffer_size = (256 * PAGE_SIZE);
+static int storvsc_ringbuffer_size = (128 * 1024);
 static u32 max_outstanding_req_per_channel;
 
 static int storvsc_vcpus_per_sub_channel = 4;
@@ -668,13 +668,22 @@ static void  handle_multichannel_storage(struct hv_device *device, int max_chns)
 {
        struct device *dev = &device->device;
        struct storvsc_device *stor_device;
-       int num_cpus = num_online_cpus();
        int num_sc;
        struct storvsc_cmd_request *request;
        struct vstor_packet *vstor_packet;
        int ret, t;
 
-       num_sc = ((max_chns > num_cpus) ? num_cpus : max_chns);
+       /*
+        * If the number of CPUs is artificially restricted, such as
+        * with maxcpus=1 on the kernel boot line, Hyper-V could offer
+        * sub-channels >= the number of CPUs. These sub-channels
+        * should not be created. The primary channel is already created
+        * and assigned to one CPU, so check against # CPUs - 1.
+        */
+       num_sc = min((int)(num_online_cpus() - 1), max_chns);
+       if (!num_sc)
+               return;
+
        stor_device = get_out_stor_device(device);
        if (!stor_device)
                return;
index 8af01777d09c74f344ad325256dbd30248febe32..f8cb7c23305b7e984e16ba94406f1f338cb17a46 100644 (file)
@@ -793,6 +793,7 @@ static int virtscsi_probe(struct virtio_device *vdev)
 
        /* We need to know how many queues before we allocate. */
        num_queues = virtscsi_config_get(vdev, num_queues) ? : 1;
+       num_queues = min_t(unsigned int, nr_cpu_ids, num_queues);
 
        num_targets = virtscsi_config_get(vdev, max_target) + 1;
 
index 9351349cf0a930cd5c25dedd6bb747970e455e96..1e0041ec813238cbfa7ab52c3fdc9799961169d1 100644 (file)
@@ -150,7 +150,12 @@ struct bcm2835_power {
 
 static int bcm2835_asb_enable(struct bcm2835_power *power, u32 reg)
 {
-       u64 start = ktime_get_ns();
+       u64 start;
+
+       if (!reg)
+               return 0;
+
+       start = ktime_get_ns();
 
        /* Enable the module's async AXI bridges. */
        ASB_WRITE(reg, ASB_READ(reg) & ~ASB_REQ_STOP);
@@ -165,7 +170,12 @@ static int bcm2835_asb_enable(struct bcm2835_power *power, u32 reg)
 
 static int bcm2835_asb_disable(struct bcm2835_power *power, u32 reg)
 {
-       u64 start = ktime_get_ns();
+       u64 start;
+
+       if (!reg)
+               return 0;
+
+       start = ktime_get_ns();
 
        /* Enable the module's async AXI bridges. */
        ASB_WRITE(reg, ASB_READ(reg) | ASB_REQ_STOP);
@@ -475,7 +485,7 @@ static int bcm2835_power_pd_power_off(struct generic_pm_domain *domain)
        }
 }
 
-static void
+static int
 bcm2835_init_power_domain(struct bcm2835_power *power,
                          int pd_xlate_index, const char *name)
 {
@@ -483,6 +493,17 @@ bcm2835_init_power_domain(struct bcm2835_power *power,
        struct bcm2835_power_domain *dom = &power->domains[pd_xlate_index];
 
        dom->clk = devm_clk_get(dev->parent, name);
+       if (IS_ERR(dom->clk)) {
+               int ret = PTR_ERR(dom->clk);
+
+               if (ret == -EPROBE_DEFER)
+                       return ret;
+
+               /* Some domains don't have a clk, so make sure that we
+                * don't deref an error pointer later.
+                */
+               dom->clk = NULL;
+       }
 
        dom->base.name = name;
        dom->base.power_on = bcm2835_power_pd_power_on;
@@ -495,6 +516,8 @@ bcm2835_init_power_domain(struct bcm2835_power *power,
        pm_genpd_init(&dom->base, NULL, true);
 
        power->pd_xlate.domains[pd_xlate_index] = &dom->base;
+
+       return 0;
 }
 
 /** bcm2835_reset_reset - Resets a block that has a reset line in the
@@ -592,7 +615,7 @@ static int bcm2835_power_probe(struct platform_device *pdev)
                { BCM2835_POWER_DOMAIN_IMAGE_PERI, BCM2835_POWER_DOMAIN_CAM0 },
                { BCM2835_POWER_DOMAIN_IMAGE_PERI, BCM2835_POWER_DOMAIN_CAM1 },
        };
-       int ret, i;
+       int ret = 0, i;
        u32 id;
 
        power = devm_kzalloc(dev, sizeof(*power), GFP_KERNEL);
@@ -619,8 +642,11 @@ static int bcm2835_power_probe(struct platform_device *pdev)
 
        power->pd_xlate.num_domains = ARRAY_SIZE(power_domain_names);
 
-       for (i = 0; i < ARRAY_SIZE(power_domain_names); i++)
-               bcm2835_init_power_domain(power, i, power_domain_names[i]);
+       for (i = 0; i < ARRAY_SIZE(power_domain_names); i++) {
+               ret = bcm2835_init_power_domain(power, i, power_domain_names[i]);
+               if (ret)
+                       goto fail;
+       }
 
        for (i = 0; i < ARRAY_SIZE(domain_deps); i++) {
                pm_genpd_add_subdomain(&power->domains[domain_deps[i].parent].base,
@@ -634,12 +660,21 @@ static int bcm2835_power_probe(struct platform_device *pdev)
 
        ret = devm_reset_controller_register(dev, &power->reset);
        if (ret)
-               return ret;
+               goto fail;
 
        of_genpd_add_provider_onecell(dev->parent->of_node, &power->pd_xlate);
 
        dev_info(dev, "Broadcom BCM2835 power domains driver");
        return 0;
+
+fail:
+       for (i = 0; i < ARRAY_SIZE(power_domain_names); i++) {
+               struct generic_pm_domain *dom = &power->domains[i].base;
+
+               if (dom->name)
+                       pm_genpd_remove(dom);
+       }
+       return ret;
 }
 
 static int bcm2835_power_remove(struct platform_device *pdev)
index c0901b96cfe44850f6b6e580d95432b20e4a2d89..62951e836cbc879d1e4be6ba158a8230ebec2c52 100644 (file)
@@ -114,8 +114,6 @@ source "drivers/staging/ralink-gdma/Kconfig"
 
 source "drivers/staging/mt7621-mmc/Kconfig"
 
-source "drivers/staging/mt7621-eth/Kconfig"
-
 source "drivers/staging/mt7621-dts/Kconfig"
 
 source "drivers/staging/gasket/Kconfig"
index 57c6bce13ff4bff0c3487315835c2e1d6b80832e..d1b17ddcd354de10c68455bc64e802c101a59e7d 100644 (file)
@@ -47,7 +47,6 @@ obj-$(CONFIG_SPI_MT7621)      += mt7621-spi/
 obj-$(CONFIG_SOC_MT7621)       += mt7621-dma/
 obj-$(CONFIG_DMA_RALINK)       += ralink-gdma/
 obj-$(CONFIG_MTK_MMC)          += mt7621-mmc/
-obj-$(CONFIG_NET_MEDIATEK_SOC_STAGING) += mt7621-eth/
 obj-$(CONFIG_SOC_MT7621)       += mt7621-dts/
 obj-$(CONFIG_STAGING_GASKET_FRAMEWORK) += gasket/
 obj-$(CONFIG_XIL_AXIS_FIFO)    += axis-fifo/
index 687537203d9cfba144fba1dc846d3cc410776b08..d9725888af6fc34045806fbb5c91ca372b7c9a46 100644 (file)
@@ -3,6 +3,7 @@
 #
 config XIL_AXIS_FIFO
        tristate "Xilinx AXI-Stream FIFO IP core driver"
+       depends on OF
        default n
        help
          This adds support for the Xilinx AXI-Stream
index a7d569cfca5db6b613e31d54eecb48e08f96413e..0dff1ac057cdeb0185cc67357213707aadf5c0cf 100644 (file)
@@ -1001,6 +1001,8 @@ int comedi_dio_insn_config(struct comedi_device *dev,
                           unsigned int mask);
 unsigned int comedi_dio_update_state(struct comedi_subdevice *s,
                                     unsigned int *data);
+unsigned int comedi_bytes_per_scan_cmd(struct comedi_subdevice *s,
+                                      struct comedi_cmd *cmd);
 unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s);
 unsigned int comedi_nscans_left(struct comedi_subdevice *s,
                                unsigned int nscans);
index eefa62f42c0f06d8b84e03379c0499d7b66d8ace..5a32b8fc000e3df08409028c9ffa5ff979d4efec 100644 (file)
@@ -394,11 +394,13 @@ unsigned int comedi_dio_update_state(struct comedi_subdevice *s,
 EXPORT_SYMBOL_GPL(comedi_dio_update_state);
 
 /**
- * comedi_bytes_per_scan() - Get length of asynchronous command "scan" in bytes
+ * comedi_bytes_per_scan_cmd() - Get length of asynchronous command "scan" in
+ * bytes
  * @s: COMEDI subdevice.
+ * @cmd: COMEDI command.
  *
  * Determines the overall scan length according to the subdevice type and the
- * number of channels in the scan.
+ * number of channels in the scan for the specified command.
  *
  * For digital input, output or input/output subdevices, samples for
  * multiple channels are assumed to be packed into one or more unsigned
@@ -408,9 +410,9 @@ EXPORT_SYMBOL_GPL(comedi_dio_update_state);
  *
  * Returns the overall scan length in bytes.
  */
-unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s)
+unsigned int comedi_bytes_per_scan_cmd(struct comedi_subdevice *s,
+                                      struct comedi_cmd *cmd)
 {
-       struct comedi_cmd *cmd = &s->async->cmd;
        unsigned int num_samples;
        unsigned int bits_per_sample;
 
@@ -427,6 +429,29 @@ unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s)
        }
        return comedi_samples_to_bytes(s, num_samples);
 }
+EXPORT_SYMBOL_GPL(comedi_bytes_per_scan_cmd);
+
+/**
+ * comedi_bytes_per_scan() - Get length of asynchronous command "scan" in bytes
+ * @s: COMEDI subdevice.
+ *
+ * Determines the overall scan length according to the subdevice type and the
+ * number of channels in the scan for the current command.
+ *
+ * For digital input, output or input/output subdevices, samples for
+ * multiple channels are assumed to be packed into one or more unsigned
+ * short or unsigned int values according to the subdevice's %SDF_LSAMPL
+ * flag.  For other types of subdevice, samples are assumed to occupy a
+ * whole unsigned short or unsigned int according to the %SDF_LSAMPL flag.
+ *
+ * Returns the overall scan length in bytes.
+ */
+unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s)
+{
+       struct comedi_cmd *cmd = &s->async->cmd;
+
+       return comedi_bytes_per_scan_cmd(s, cmd);
+}
 EXPORT_SYMBOL_GPL(comedi_bytes_per_scan);
 
 static unsigned int __comedi_nscans_left(struct comedi_subdevice *s,
index 5edf59ac6706d3b5cd7d23d0f945895dc1cb8f48..b04dad8c70927a0aa52229393c063adce4b32e37 100644 (file)
@@ -3545,6 +3545,7 @@ static int ni_cdio_cmdtest(struct comedi_device *dev,
                           struct comedi_subdevice *s, struct comedi_cmd *cmd)
 {
        struct ni_private *devpriv = dev->private;
+       unsigned int bytes_per_scan;
        int err = 0;
 
        /* Step 1 : check if triggers are trivially valid */
@@ -3579,9 +3580,12 @@ static int ni_cdio_cmdtest(struct comedi_device *dev,
        err |= comedi_check_trigger_arg_is(&cmd->convert_arg, 0);
        err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
                                           cmd->chanlist_len);
-       err |= comedi_check_trigger_arg_max(&cmd->stop_arg,
-                                           s->async->prealloc_bufsz /
-                                           comedi_bytes_per_scan(s));
+       bytes_per_scan = comedi_bytes_per_scan_cmd(s, cmd);
+       if (bytes_per_scan) {
+               err |= comedi_check_trigger_arg_max(&cmd->stop_arg,
+                                                   s->async->prealloc_bufsz /
+                                                   bytes_per_scan);
+       }
 
        if (err)
                return 3;
index 829f7b12e0dcf4aa3ee34a5315f6d5f52b7087ec..9bbc68729c11052018c26335d6b88498e3491b32 100644 (file)
@@ -23,6 +23,21 @@ static const unsigned char erofs_filetype_table[EROFS_FT_MAX] = {
        [EROFS_FT_SYMLINK]      = DT_LNK,
 };
 
+static void debug_one_dentry(unsigned char d_type, const char *de_name,
+                            unsigned int de_namelen)
+{
+#ifdef CONFIG_EROFS_FS_DEBUG
+       /* since the on-disk name could not have the trailing '\0' */
+       unsigned char dbg_namebuf[EROFS_NAME_LEN + 1];
+
+       memcpy(dbg_namebuf, de_name, de_namelen);
+       dbg_namebuf[de_namelen] = '\0';
+
+       debugln("found dirent %s de_len %u d_type %d", dbg_namebuf,
+               de_namelen, d_type);
+#endif
+}
+
 static int erofs_fill_dentries(struct dir_context *ctx,
                               void *dentry_blk, unsigned int *ofs,
                               unsigned int nameoff, unsigned int maxsize)
@@ -33,14 +48,10 @@ static int erofs_fill_dentries(struct dir_context *ctx,
        de = dentry_blk + *ofs;
        while (de < end) {
                const char *de_name;
-               int de_namelen;
+               unsigned int de_namelen;
                unsigned char d_type;
-#ifdef CONFIG_EROFS_FS_DEBUG
-               unsigned int dbg_namelen;
-               unsigned char dbg_namebuf[EROFS_NAME_LEN];
-#endif
 
-               if (unlikely(de->file_type < EROFS_FT_MAX))
+               if (de->file_type < EROFS_FT_MAX)
                        d_type = erofs_filetype_table[de->file_type];
                else
                        d_type = DT_UNKNOWN;
@@ -48,26 +59,20 @@ static int erofs_fill_dentries(struct dir_context *ctx,
                nameoff = le16_to_cpu(de->nameoff);
                de_name = (char *)dentry_blk + nameoff;
 
-               de_namelen = unlikely(de + 1 >= end) ?
-                       /* last directory entry */
-                       strnlen(de_name, maxsize - nameoff) :
-                       le16_to_cpu(de[1].nameoff) - nameoff;
+               /* the last dirent in the block? */
+               if (de + 1 >= end)
+                       de_namelen = strnlen(de_name, maxsize - nameoff);
+               else
+                       de_namelen = le16_to_cpu(de[1].nameoff) - nameoff;
 
                /* a corrupted entry is found */
-               if (unlikely(de_namelen < 0)) {
+               if (unlikely(nameoff + de_namelen > maxsize ||
+                            de_namelen > EROFS_NAME_LEN)) {
                        DBG_BUGON(1);
                        return -EIO;
                }
 
-#ifdef CONFIG_EROFS_FS_DEBUG
-               dbg_namelen = min(EROFS_NAME_LEN - 1, de_namelen);
-               memcpy(dbg_namebuf, de_name, dbg_namelen);
-               dbg_namebuf[dbg_namelen] = '\0';
-
-               debugln("%s, found de_name %s de_len %d d_type %d", __func__,
-                       dbg_namebuf, de_namelen, d_type);
-#endif
-
+               debug_one_dentry(d_type, de_name, de_namelen);
                if (!dir_emit(ctx, de_name, de_namelen,
                              le64_to_cpu(de->nid), d_type))
                        /* stopped by some reason */
index 8715bc50e09c16d44ece32baa474eb7d9bc5ab8f..31eef839577436709b1a5261507aff59bcf821d7 100644 (file)
@@ -972,6 +972,7 @@ static int z_erofs_vle_unzip(struct super_block *sb,
        overlapped = false;
        compressed_pages = grp->compressed_pages;
 
+       err = 0;
        for (i = 0; i < clusterpages; ++i) {
                unsigned int pagenr;
 
@@ -981,26 +982,39 @@ static int z_erofs_vle_unzip(struct super_block *sb,
                DBG_BUGON(!page);
                DBG_BUGON(!page->mapping);
 
-               if (z_erofs_is_stagingpage(page))
-                       continue;
+               if (!z_erofs_is_stagingpage(page)) {
 #ifdef EROFS_FS_HAS_MANAGED_CACHE
-               if (page->mapping == MNGD_MAPPING(sbi)) {
-                       DBG_BUGON(!PageUptodate(page));
-                       continue;
-               }
+                       if (page->mapping == MNGD_MAPPING(sbi)) {
+                               if (unlikely(!PageUptodate(page)))
+                                       err = -EIO;
+                               continue;
+                       }
 #endif
 
-               /* only non-head page could be reused as a compressed page */
-               pagenr = z_erofs_onlinepage_index(page);
+                       /*
+                        * only if non-head page can be selected
+                        * for inplace decompression
+                        */
+                       pagenr = z_erofs_onlinepage_index(page);
 
-               DBG_BUGON(pagenr >= nr_pages);
-               DBG_BUGON(pages[pagenr]);
-               ++sparsemem_pages;
-               pages[pagenr] = page;
+                       DBG_BUGON(pagenr >= nr_pages);
+                       DBG_BUGON(pages[pagenr]);
+                       ++sparsemem_pages;
+                       pages[pagenr] = page;
 
-               overlapped = true;
+                       overlapped = true;
+               }
+
+               /* PG_error needs checking for inplaced and staging pages */
+               if (unlikely(PageError(page))) {
+                       DBG_BUGON(PageUptodate(page));
+                       err = -EIO;
+               }
        }
 
+       if (unlikely(err))
+               goto out;
+
        llen = (nr_pages << PAGE_SHIFT) - work->pageofs;
 
        if (z_erofs_vle_workgrp_fmt(grp) == Z_EROFS_VLE_WORKGRP_FMT_PLAIN) {
@@ -1029,6 +1043,10 @@ static int z_erofs_vle_unzip(struct super_block *sb,
 
 skip_allocpage:
        vout = erofs_vmap(pages, nr_pages);
+       if (!vout) {
+               err = -ENOMEM;
+               goto out;
+       }
 
        err = z_erofs_vle_unzip_vmap(compressed_pages,
                clusterpages, vout, llen, work->pageofs, overlapped);
@@ -1194,6 +1212,7 @@ pickup_page_for_submission(struct z_erofs_vle_workgroup *grp,
        if (page->mapping == mc) {
                WRITE_ONCE(grp->compressed_pages[nr], page);
 
+               ClearPageError(page);
                if (!PagePrivate(page)) {
                        /*
                         * impossible to be !PagePrivate(page) for
index 48b263a2731aad2edd28f19f9df3fcc8e461bc1c..0daac9b984a8ec82207ca4e53da9a201a4204707 100644 (file)
@@ -136,10 +136,13 @@ int z_erofs_vle_unzip_fast_percpu(struct page **compressed_pages,
 
        nr_pages = DIV_ROUND_UP(outlen + pageofs, PAGE_SIZE);
 
-       if (clusterpages == 1)
+       if (clusterpages == 1) {
                vin = kmap_atomic(compressed_pages[0]);
-       else
+       } else {
                vin = erofs_vmap(compressed_pages, clusterpages);
+               if (!vin)
+                       return -ENOMEM;
+       }
 
        preempt_disable();
        vout = erofs_pcpubuf[smp_processor_id()].data;
index b733855402168efffa6a8a192a3d21e5d3e0ff60..250c15ace2a71147be4e6f14e522b6a3e898745e 100644 (file)
@@ -117,22 +117,6 @@ &pcie {
        status = "okay";
 };
 
-&ethernet {
-       //mtd-mac-address = <&factory 0xe000>;
-       gmac1: mac@0 {
-               compatible = "mediatek,eth-mac";
-               reg = <0>;
-               phy-handle = <&phy1>;
-       };
-
-       mdio-bus {
-               phy1: ethernet-phy@1 {
-                       reg = <1>;
-                       phy-mode = "rgmii";
-               };
-       };
-};
-
 &pinctrl {
        state_default: pinctrl0 {
                gpio {
@@ -141,3 +125,16 @@ gpio {
                };
        };
 };
+
+&switch0 {
+       ports {
+               port@0 {
+                       label = "ethblack";
+                       status = "ok";
+               };
+               port@4 {
+                       label = "ethblue";
+                       status = "ok";
+               };
+       };
+};
index 6aff3680ce4b6b4574247ffc564b2fe922e5a544..17020e24abd294055b321985c6806904eb1e8e81 100644 (file)
@@ -372,16 +372,83 @@ ethernet: ethernet@1e100000 {
 
                mediatek,ethsys = <&ethsys>;
 
-               mediatek,switch = <&gsw>;
 
+               gmac0: mac@0 {
+                       compatible = "mediatek,eth-mac";
+                       reg = <0>;
+                       phy-mode = "rgmii";
+                       fixed-link {
+                               speed = <1000>;
+                               full-duplex;
+                               pause;
+                       };
+               };
+               gmac1: mac@1 {
+                       compatible = "mediatek,eth-mac";
+                       reg = <1>;
+                       status = "off";
+                       phy-mode = "rgmii";
+                       phy-handle = <&phy5>;
+               };
                mdio-bus {
                        #address-cells = <1>;
                        #size-cells = <0>;
 
-                       phy1f: ethernet-phy@1f {
-                               reg = <0x1f>;
+                       phy5: ethernet-phy@5 {
+                               reg = <5>;
                                phy-mode = "rgmii";
                        };
+
+                       switch0: switch0@0 {
+                               compatible = "mediatek,mt7621";
+                               #address-cells = <1>;
+                               #size-cells = <0>;
+                               reg = <0>;
+                               mediatek,mcm;
+                               resets = <&rstctrl 2>;
+                               reset-names = "mcm";
+
+                               ports {
+                                       #address-cells = <1>;
+                                       #size-cells = <0>;
+                                       reg = <0>;
+                                       port@0 {
+                                               status = "off";
+                                               reg = <0>;
+                                               label = "lan0";
+                                       };
+                                       port@1 {
+                                               status = "off";
+                                               reg = <1>;
+                                               label = "lan1";
+                                       };
+                                       port@2 {
+                                               status = "off";
+                                               reg = <2>;
+                                               label = "lan2";
+                                       };
+                                       port@3 {
+                                               status = "off";
+                                               reg = <3>;
+                                               label = "lan3";
+                                       };
+                                       port@4 {
+                                               status = "off";
+                                               reg = <4>;
+                                               label = "lan4";
+                                       };
+                                       port@6 {
+                                               reg = <6>;
+                                               label = "cpu";
+                                               ethernet = <&gmac0>;
+                                               phy-mode = "trgmii";
+                                               fixed-link {
+                                                       speed = <1000>;
+                                                       full-duplex;
+                                               };
+                                       };
+                               };
+                       };
                };
        };
 
diff --git a/drivers/staging/mt7621-eth/Documentation/devicetree/bindings/net/mediatek-net-gsw.txt b/drivers/staging/mt7621-eth/Documentation/devicetree/bindings/net/mediatek-net-gsw.txt
deleted file mode 100644 (file)
index 596b385..0000000
+++ /dev/null
@@ -1,48 +0,0 @@
-Mediatek Gigabit Switch
-=======================
-
-The mediatek gigabit switch can be found on Mediatek SoCs.
-
-Required properties:
-- compatible: Should be "mediatek,mt7620-gsw", "mediatek,mt7621-gsw",
-  "mediatek,mt7623-gsw"
-- reg: Address and length of the register set for the device
-- interrupts: Should contain the gigabit switches interrupt
-
-
-Additional required properties for ARM based SoCs:
-- mediatek,reset-pin: phandle describing the reset GPIO
-- clocks: the clocks used by the switch
-- clock-names: the names of the clocks listed in the clocks property
-  these should be "trgpll", "esw", "gp2", "gp1"
-- mt7530-supply: the phandle of the regulator used to power the switch
-- mediatek,pctl-regmap: phandle to the port control regmap. this is used to
-  setup the drive current
-
-
-Optional properties:
-- interrupt-parent: Should be the phandle for the interrupt controller
-  that services interrupts for this device
-
-Example:
-
-gsw: switch@1b100000 {
-       compatible = "mediatek,mt7623-gsw";
-       reg = <0 0x1b110000 0 0x300000>;
-
-       interrupt-parent = <&pio>;
-       interrupts = <168 IRQ_TYPE_EDGE_RISING>;
-
-       clocks = <&apmixedsys CLK_APMIXED_TRGPLL>,
-                <&ethsys CLK_ETHSYS_ESW>,
-                <&ethsys CLK_ETHSYS_GP2>,
-                <&ethsys CLK_ETHSYS_GP1>;
-       clock-names = "trgpll", "esw", "gp2", "gp1";
-
-       mt7530-supply = <&mt6323_vpa_reg>;
-
-       mediatek,pctl-regmap = <&syscfg_pctl_a>;
-       mediatek,reset-pin = <&pio 15 0>;
-
-       status = "okay";
-};
diff --git a/drivers/staging/mt7621-eth/Kconfig b/drivers/staging/mt7621-eth/Kconfig
deleted file mode 100644 (file)
index 44ea86c..0000000
+++ /dev/null
@@ -1,39 +0,0 @@
-config NET_VENDOR_MEDIATEK_STAGING
-       bool "MediaTek ethernet driver - staging version"
-       depends on RALINK
-       ---help---
-         If you have an MT7621 Mediatek SoC with ethernet, say Y.
-
-if NET_VENDOR_MEDIATEK_STAGING
-choice
-       prompt "MAC type"
-
-config NET_MEDIATEK_MT7621
-       bool "MT7621"
-       depends on MIPS && SOC_MT7621
-
-endchoice
-
-config NET_MEDIATEK_SOC_STAGING
-       tristate "MediaTek SoC Gigabit Ethernet support"
-       depends on NET_VENDOR_MEDIATEK_STAGING
-       select PHYLIB
-       ---help---
-         This driver supports the gigabit ethernet MACs in the
-         MediaTek SoC family.
-
-config NET_MEDIATEK_MDIO
-       def_bool NET_MEDIATEK_SOC_STAGING
-       depends on NET_MEDIATEK_MT7621
-       select PHYLIB
-
-config NET_MEDIATEK_MDIO_MT7620
-       def_bool NET_MEDIATEK_SOC_STAGING
-       depends on NET_MEDIATEK_MT7621
-       select NET_MEDIATEK_MDIO
-
-config NET_MEDIATEK_GSW_MT7621
-       def_tristate NET_MEDIATEK_SOC_STAGING
-       depends on NET_MEDIATEK_MT7621
-
-endif #NET_VENDOR_MEDIATEK_STAGING
diff --git a/drivers/staging/mt7621-eth/Makefile b/drivers/staging/mt7621-eth/Makefile
deleted file mode 100644 (file)
index 018bcc3..0000000
+++ /dev/null
@@ -1,14 +0,0 @@
-#
-# Makefile for the Ralink SoCs built-in ethernet macs
-#
-
-mtk-eth-soc-y                                  += mtk_eth_soc.o ethtool.o
-
-mtk-eth-soc-$(CONFIG_NET_MEDIATEK_MDIO)                += mdio.o
-mtk-eth-soc-$(CONFIG_NET_MEDIATEK_MDIO_MT7620) += mdio_mt7620.o
-
-mtk-eth-soc-$(CONFIG_NET_MEDIATEK_MT7621)      += soc_mt7621.o
-
-obj-$(CONFIG_NET_MEDIATEK_GSW_MT7621)          += gsw_mt7621.o
-
-obj-$(CONFIG_NET_MEDIATEK_SOC_STAGING)         += mtk-eth-soc.o
diff --git a/drivers/staging/mt7621-eth/TODO b/drivers/staging/mt7621-eth/TODO
deleted file mode 100644 (file)
index f9e47d4..0000000
+++ /dev/null
@@ -1,13 +0,0 @@
-
-- verify devicetree documentation is consistent with code
-- fix ethtool - currently doesn't return valid data.
-- general code review and clean up
-- add support for second MAC on mt7621
-- convert gsw code to use switchdev interfaces
-- md7620_mmi_write etc should probably be wrapped
-  in a regmap abstraction.
-- Get soc_mt7621 to work with QDMA TX if possible.
-- Ensure phys are correctly configured when a cable
-  is plugged in.
-
-Cc: NeilBrown <neil@brown.name>
diff --git a/drivers/staging/mt7621-eth/ethtool.c b/drivers/staging/mt7621-eth/ethtool.c
deleted file mode 100644 (file)
index 8c4228e..0000000
+++ /dev/null
@@ -1,250 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*   This program is free software; you can redistribute it and/or modify
- *   it under the terms of the GNU General Public License as published by
- *   the Free Software Foundation; version 2 of the License
- *
- *   This program is distributed in the hope that it will be useful,
- *   but WITHOUT ANY WARRANTY; without even the implied warranty of
- *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *   GNU General Public License for more details.
- *
- *   Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
- *   Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
- *   Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
- */
-
-#include "mtk_eth_soc.h"
-#include "ethtool.h"
-
-struct mtk_stat {
-       char name[ETH_GSTRING_LEN];
-       unsigned int idx;
-};
-
-#define MTK_HW_STAT(stat) { \
-       .name = #stat, \
-       .idx = offsetof(struct mtk_hw_stats, stat) / sizeof(u64) \
-}
-
-static const struct mtk_stat mtk_ethtool_hw_stats[] = {
-       MTK_HW_STAT(tx_bytes),
-       MTK_HW_STAT(tx_packets),
-       MTK_HW_STAT(tx_skip),
-       MTK_HW_STAT(tx_collisions),
-       MTK_HW_STAT(rx_bytes),
-       MTK_HW_STAT(rx_packets),
-       MTK_HW_STAT(rx_overflow),
-       MTK_HW_STAT(rx_fcs_errors),
-       MTK_HW_STAT(rx_short_errors),
-       MTK_HW_STAT(rx_long_errors),
-       MTK_HW_STAT(rx_checksum_errors),
-       MTK_HW_STAT(rx_flow_control_packets),
-};
-
-#define MTK_HW_STATS_LEN       ARRAY_SIZE(mtk_ethtool_hw_stats)
-
-static int mtk_get_link_ksettings(struct net_device *dev,
-                                 struct ethtool_link_ksettings *cmd)
-{
-       struct mtk_mac *mac = netdev_priv(dev);
-       int err;
-
-       if (!mac->phy_dev)
-               return -ENODEV;
-
-       if (mac->phy_flags == MTK_PHY_FLAG_ATTACH) {
-               err = phy_read_status(mac->phy_dev);
-               if (err)
-                       return -ENODEV;
-       }
-
-       phy_ethtool_ksettings_get(mac->phy_dev, cmd);
-       return 0;
-}
-
-static int mtk_set_link_ksettings(struct net_device *dev,
-                                 const struct ethtool_link_ksettings *cmd)
-{
-       struct mtk_mac *mac = netdev_priv(dev);
-
-       if (!mac->phy_dev)
-               return -ENODEV;
-
-       if (cmd->base.phy_address != mac->phy_dev->mdio.addr) {
-               if (mac->hw->phy->phy_node[cmd->base.phy_address]) {
-                       mac->phy_dev = mac->hw->phy->phy[cmd->base.phy_address];
-                       mac->phy_flags = MTK_PHY_FLAG_PORT;
-               } else if (mac->hw->mii_bus) {
-                       mac->phy_dev = mdiobus_get_phy(mac->hw->mii_bus,
-                                                      cmd->base.phy_address);
-                       if (!mac->phy_dev)
-                               return -ENODEV;
-                       mac->phy_flags = MTK_PHY_FLAG_ATTACH;
-               } else {
-                       return -ENODEV;
-               }
-       }
-
-       return phy_ethtool_ksettings_set(mac->phy_dev, cmd);
-}
-
-static void mtk_get_drvinfo(struct net_device *dev,
-                           struct ethtool_drvinfo *info)
-{
-       struct mtk_mac *mac = netdev_priv(dev);
-       struct mtk_soc_data *soc = mac->hw->soc;
-
-       strlcpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
-       strlcpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
-
-       if (soc->reg_table[MTK_REG_MTK_COUNTER_BASE])
-               info->n_stats = MTK_HW_STATS_LEN;
-}
-
-static u32 mtk_get_msglevel(struct net_device *dev)
-{
-       struct mtk_mac *mac = netdev_priv(dev);
-
-       return mac->hw->msg_enable;
-}
-
-static void mtk_set_msglevel(struct net_device *dev, u32 value)
-{
-       struct mtk_mac *mac = netdev_priv(dev);
-
-       mac->hw->msg_enable = value;
-}
-
-static int mtk_nway_reset(struct net_device *dev)
-{
-       struct mtk_mac *mac = netdev_priv(dev);
-
-       if (!mac->phy_dev)
-               return -EOPNOTSUPP;
-
-       return genphy_restart_aneg(mac->phy_dev);
-}
-
-static u32 mtk_get_link(struct net_device *dev)
-{
-       struct mtk_mac *mac = netdev_priv(dev);
-       int err;
-
-       if (!mac->phy_dev)
-               goto out_get_link;
-
-       if (mac->phy_flags == MTK_PHY_FLAG_ATTACH) {
-               err = genphy_update_link(mac->phy_dev);
-               if (err)
-                       goto out_get_link;
-       }
-
-       return mac->phy_dev->link;
-
-out_get_link:
-       return ethtool_op_get_link(dev);
-}
-
-static int mtk_set_ringparam(struct net_device *dev,
-                            struct ethtool_ringparam *ring)
-{
-       struct mtk_mac *mac = netdev_priv(dev);
-
-       if ((ring->tx_pending < 2) ||
-           (ring->rx_pending < 2) ||
-           (ring->rx_pending > mac->hw->soc->dma_ring_size) ||
-           (ring->tx_pending > mac->hw->soc->dma_ring_size))
-               return -EINVAL;
-
-       dev->netdev_ops->ndo_stop(dev);
-
-       mac->hw->tx_ring.tx_ring_size = BIT(fls(ring->tx_pending) - 1);
-       mac->hw->rx_ring[0].rx_ring_size = BIT(fls(ring->rx_pending) - 1);
-
-       return dev->netdev_ops->ndo_open(dev);
-}
-
-static void mtk_get_ringparam(struct net_device *dev,
-                             struct ethtool_ringparam *ring)
-{
-       struct mtk_mac *mac = netdev_priv(dev);
-
-       ring->rx_max_pending = mac->hw->soc->dma_ring_size;
-       ring->tx_max_pending = mac->hw->soc->dma_ring_size;
-       ring->rx_pending = mac->hw->rx_ring[0].rx_ring_size;
-       ring->tx_pending = mac->hw->tx_ring.tx_ring_size;
-}
-
-static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
-{
-       int i;
-
-       switch (stringset) {
-       case ETH_SS_STATS:
-               for (i = 0; i < MTK_HW_STATS_LEN; i++) {
-                       memcpy(data, mtk_ethtool_hw_stats[i].name,
-                              ETH_GSTRING_LEN);
-                       data += ETH_GSTRING_LEN;
-               }
-               break;
-       }
-}
-
-static int mtk_get_sset_count(struct net_device *dev, int sset)
-{
-       switch (sset) {
-       case ETH_SS_STATS:
-               return MTK_HW_STATS_LEN;
-       default:
-               return -EOPNOTSUPP;
-       }
-}
-
-static void mtk_get_ethtool_stats(struct net_device *dev,
-                                 struct ethtool_stats *stats, u64 *data)
-{
-       struct mtk_mac *mac = netdev_priv(dev);
-       struct mtk_hw_stats *hwstats = mac->hw_stats;
-       unsigned int start;
-       int i;
-
-       if (netif_running(dev) && netif_device_present(dev)) {
-               if (spin_trylock(&hwstats->stats_lock)) {
-                       mtk_stats_update_mac(mac);
-                       spin_unlock(&hwstats->stats_lock);
-               }
-       }
-
-       do {
-               start = u64_stats_fetch_begin_irq(&hwstats->syncp);
-               for (i = 0; i < MTK_HW_STATS_LEN; i++)
-                       data[i] = ((u64 *)hwstats)[mtk_ethtool_hw_stats[i].idx];
-
-       } while (u64_stats_fetch_retry_irq(&hwstats->syncp, start));
-}
-
-static struct ethtool_ops mtk_ethtool_ops = {
-       .get_link_ksettings     = mtk_get_link_ksettings,
-       .set_link_ksettings     = mtk_set_link_ksettings,
-       .get_drvinfo            = mtk_get_drvinfo,
-       .get_msglevel           = mtk_get_msglevel,
-       .set_msglevel           = mtk_set_msglevel,
-       .nway_reset             = mtk_nway_reset,
-       .get_link               = mtk_get_link,
-       .set_ringparam          = mtk_set_ringparam,
-       .get_ringparam          = mtk_get_ringparam,
-};
-
-void mtk_set_ethtool_ops(struct net_device *netdev)
-{
-       struct mtk_mac *mac = netdev_priv(netdev);
-       struct mtk_soc_data *soc = mac->hw->soc;
-
-       if (soc->reg_table[MTK_REG_MTK_COUNTER_BASE]) {
-               mtk_ethtool_ops.get_strings = mtk_get_strings;
-               mtk_ethtool_ops.get_sset_count = mtk_get_sset_count;
-               mtk_ethtool_ops.get_ethtool_stats = mtk_get_ethtool_stats;
-       }
-
-       netdev->ethtool_ops = &mtk_ethtool_ops;
-}
diff --git a/drivers/staging/mt7621-eth/ethtool.h b/drivers/staging/mt7621-eth/ethtool.h
deleted file mode 100644 (file)
index 0071469..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- *   Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
- *   Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
- *   Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
- */
-
-#ifndef MTK_ETHTOOL_H
-#define MTK_ETHTOOL_H
-
-#include <linux/ethtool.h>
-
-void mtk_set_ethtool_ops(struct net_device *netdev);
-
-#endif /* MTK_ETHTOOL_H */
diff --git a/drivers/staging/mt7621-eth/gsw_mt7620.h b/drivers/staging/mt7621-eth/gsw_mt7620.h
deleted file mode 100644 (file)
index 70f7e54..0000000
+++ /dev/null
@@ -1,277 +0,0 @@
-/*   This program is free software; you can redistribute it and/or modify
- *   it under the terms of the GNU General Public License as published by
- *   the Free Software Foundation; version 2 of the License
- *
- *   This program is distributed in the hope that it will be useful,
- *   but WITHOUT ANY WARRANTY; without even the implied warranty of
- *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *   GNU General Public License for more details.
- *
- *   Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
- *   Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
- *   Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
- */
-
-#ifndef _RALINK_GSW_MT7620_H__
-#define _RALINK_GSW_MT7620_H__
-
-#define GSW_REG_PHY_TIMEOUT    (5 * HZ)
-
-#define MT7620_GSW_REG_PIAC    0x0004
-
-#define GSW_NUM_VLANS          16
-#define GSW_NUM_VIDS           4096
-#define GSW_NUM_PORTS          7
-#define GSW_PORT6              6
-
-#define GSW_MDIO_ACCESS                BIT(31)
-#define GSW_MDIO_READ          BIT(19)
-#define GSW_MDIO_WRITE         BIT(18)
-#define GSW_MDIO_START         BIT(16)
-#define GSW_MDIO_ADDR_SHIFT    20
-#define GSW_MDIO_REG_SHIFT     25
-
-#define GSW_REG_PORT_PMCR(x)   (0x3000 + (x * 0x100))
-#define GSW_REG_PORT_STATUS(x) (0x3008 + (x * 0x100))
-#define GSW_REG_SMACCR0                0x3fE4
-#define GSW_REG_SMACCR1                0x3fE8
-#define GSW_REG_CKGCR          0x3ff0
-
-#define GSW_REG_IMR            0x7008
-#define GSW_REG_ISR            0x700c
-#define GSW_REG_GPC1           0x7014
-
-#define SYSC_REG_CHIP_REV_ID   0x0c
-#define SYSC_REG_CFG           0x10
-#define SYSC_REG_CFG1          0x14
-#define RST_CTRL_MCM           BIT(2)
-#define SYSC_PAD_RGMII2_MDIO   0x58
-#define SYSC_GPIO_MODE         0x60
-
-#define PORT_IRQ_ST_CHG                0x7f
-
-#define MT7621_ESW_PHY_POLLING 0x0000
-#define MT7620_ESW_PHY_POLLING 0x7000
-
-#define        PMCR_IPG                BIT(18)
-#define        PMCR_MAC_MODE           BIT(16)
-#define        PMCR_FORCE              BIT(15)
-#define        PMCR_TX_EN              BIT(14)
-#define        PMCR_RX_EN              BIT(13)
-#define        PMCR_BACKOFF            BIT(9)
-#define        PMCR_BACKPRES           BIT(8)
-#define        PMCR_RX_FC              BIT(5)
-#define        PMCR_TX_FC              BIT(4)
-#define        PMCR_SPEED(_x)          (_x << 2)
-#define        PMCR_DUPLEX             BIT(1)
-#define        PMCR_LINK               BIT(0)
-
-#define PHY_AN_EN              BIT(31)
-#define PHY_PRE_EN             BIT(30)
-#define PMY_MDC_CONF(_x)       ((_x & 0x3f) << 24)
-
-/* ethernet subsystem config register */
-#define ETHSYS_SYSCFG0         0x14
-/* ethernet subsystem clock register */
-#define ETHSYS_CLKCFG0         0x2c
-#define ETHSYS_TRGMII_CLK_SEL362_5     BIT(11)
-
-/* p5 RGMII wrapper TX clock control register */
-#define MT7530_P5RGMIITXCR     0x7b04
-/* p5 RGMII wrapper RX clock control register */
-#define MT7530_P5RGMIIRXCR     0x7b00
-/* TRGMII TDX ODT registers */
-#define MT7530_TRGMII_TD0_ODT  0x7a54
-#define MT7530_TRGMII_TD1_ODT  0x7a5c
-#define MT7530_TRGMII_TD2_ODT  0x7a64
-#define MT7530_TRGMII_TD3_ODT  0x7a6c
-#define MT7530_TRGMII_TD4_ODT  0x7a74
-#define MT7530_TRGMII_TD5_ODT  0x7a7c
-/* TRGMII TCK ctrl register */
-#define MT7530_TRGMII_TCK_CTRL 0x7a78
-/* TRGMII Tx ctrl register */
-#define MT7530_TRGMII_TXCTRL   0x7a40
-/* port 6 extended control register */
-#define MT7530_P6ECR            0x7830
-/* IO driver control register */
-#define MT7530_IO_DRV_CR       0x7810
-/* top signal control register */
-#define MT7530_TOP_SIG_CTRL    0x7808
-/* modified hwtrap register */
-#define MT7530_MHWTRAP         0x7804
-/* hwtrap status register */
-#define MT7530_HWTRAP          0x7800
-/* status interrupt register */
-#define MT7530_SYS_INT_STS     0x700c
-/* system nterrupt register */
-#define MT7530_SYS_INT_EN      0x7008
-/* system control register */
-#define MT7530_SYS_CTRL                0x7000
-/* port MAC status register */
-#define MT7530_PMSR_P(x)       (0x3008 + (x * 0x100))
-/* port MAC control register */
-#define MT7530_PMCR_P(x)       (0x3000 + (x * 0x100))
-
-#define MT7621_XTAL_SHIFT      6
-#define MT7621_XTAL_MASK       0x7
-#define MT7621_XTAL_25         6
-#define MT7621_XTAL_40         3
-#define MT7621_MDIO_DRV_MASK   (3 << 4)
-#define MT7621_GE1_MODE_MASK   (3 << 12)
-
-#define TRGMII_TXCTRL_TXC_INV  BIT(30)
-#define P6ECR_INTF_MODE_RGMII  BIT(1)
-#define P5RGMIIRXCR_C_ALIGN    BIT(8)
-#define P5RGMIIRXCR_DELAY_2    BIT(1)
-#define P5RGMIITXCR_DELAY_2    (BIT(8) | BIT(2))
-
-/* TOP_SIG_CTRL bits */
-#define TOP_SIG_CTRL_NORMAL    (BIT(17) | BIT(16))
-
-/* MHWTRAP bits */
-#define MHWTRAP_MANUAL         BIT(16)
-#define MHWTRAP_P5_MAC_SEL     BIT(13)
-#define MHWTRAP_P6_DIS         BIT(8)
-#define MHWTRAP_P5_RGMII_MODE  BIT(7)
-#define MHWTRAP_P5_DIS         BIT(6)
-#define MHWTRAP_PHY_ACCESS     BIT(5)
-
-/* HWTRAP bits */
-#define HWTRAP_XTAL_SHIFT      9
-#define HWTRAP_XTAL_MASK       0x3
-
-/* SYS_CTRL bits */
-#define SYS_CTRL_SW_RST                BIT(1)
-#define SYS_CTRL_REG_RST       BIT(0)
-
-/* PMCR bits */
-#define PMCR_IFG_XMIT_96       BIT(18)
-#define PMCR_MAC_MODE          BIT(16)
-#define PMCR_FORCE_MODE                BIT(15)
-#define PMCR_TX_EN             BIT(14)
-#define PMCR_RX_EN             BIT(13)
-#define PMCR_BACK_PRES_EN      BIT(9)
-#define PMCR_BACKOFF_EN                BIT(8)
-#define PMCR_TX_FC_EN          BIT(5)
-#define PMCR_RX_FC_EN          BIT(4)
-#define PMCR_FORCE_SPEED_1000  BIT(3)
-#define PMCR_FORCE_FDX         BIT(1)
-#define PMCR_FORCE_LNK         BIT(0)
-#define PMCR_FIXED_LINK                (PMCR_IFG_XMIT_96 | PMCR_MAC_MODE | \
-                                PMCR_FORCE_MODE | PMCR_TX_EN | PMCR_RX_EN | \
-                                PMCR_BACK_PRES_EN | PMCR_BACKOFF_EN | \
-                                PMCR_FORCE_SPEED_1000 | PMCR_FORCE_FDX | \
-                                PMCR_FORCE_LNK)
-
-#define PMCR_FIXED_LINK_FC     (PMCR_FIXED_LINK | \
-                                PMCR_TX_FC_EN | PMCR_RX_FC_EN)
-
-/* TRGMII control registers */
-#define GSW_INTF_MODE          0x390
-#define GSW_TRGMII_TD0_ODT     0x354
-#define GSW_TRGMII_TD1_ODT     0x35c
-#define GSW_TRGMII_TD2_ODT     0x364
-#define GSW_TRGMII_TD3_ODT     0x36c
-#define GSW_TRGMII_TXCTL_ODT   0x374
-#define GSW_TRGMII_TCK_ODT     0x37c
-#define GSW_TRGMII_RCK_CTRL    0x300
-
-#define INTF_MODE_TRGMII       BIT(1)
-#define TRGMII_RCK_CTRL_RX_RST BIT(31)
-
-/* Mac control registers */
-#define MTK_MAC_P2_MCR         0x200
-#define MTK_MAC_P1_MCR         0x100
-
-#define MAC_MCR_MAX_RX_2K      BIT(29)
-#define MAC_MCR_IPG_CFG                (BIT(18) | BIT(16))
-#define MAC_MCR_FORCE_MODE     BIT(15)
-#define MAC_MCR_TX_EN          BIT(14)
-#define MAC_MCR_RX_EN          BIT(13)
-#define MAC_MCR_BACKOFF_EN     BIT(9)
-#define MAC_MCR_BACKPR_EN      BIT(8)
-#define MAC_MCR_FORCE_RX_FC    BIT(5)
-#define MAC_MCR_FORCE_TX_FC    BIT(4)
-#define MAC_MCR_SPEED_1000     BIT(3)
-#define MAC_MCR_FORCE_DPX      BIT(1)
-#define MAC_MCR_FORCE_LINK     BIT(0)
-#define MAC_MCR_FIXED_LINK     (MAC_MCR_MAX_RX_2K | MAC_MCR_IPG_CFG | \
-                                MAC_MCR_FORCE_MODE | MAC_MCR_TX_EN | \
-                                MAC_MCR_RX_EN | MAC_MCR_BACKOFF_EN | \
-                                MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_RX_FC | \
-                                MAC_MCR_FORCE_TX_FC | MAC_MCR_SPEED_1000 | \
-                                MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_LINK)
-#define MAC_MCR_FIXED_LINK_FC  (MAC_MCR_MAX_RX_2K | MAC_MCR_IPG_CFG | \
-                                MAC_MCR_FIXED_LINK)
-
-/* possible XTAL speed */
-#define        MT7623_XTAL_40          0
-#define MT7623_XTAL_20         1
-#define MT7623_XTAL_25         3
-
-/* GPIO port control registers */
-#define        GPIO_OD33_CTRL8         0x4c0
-#define        GPIO_BIAS_CTRL          0xed0
-#define GPIO_DRV_SEL10         0xf00
-
-/* on MT7620 the functio of port 4 can be software configured */
-enum {
-       PORT4_EPHY = 0,
-       PORT4_EXT,
-};
-
-/* struct mt7620_gsw - the structure that holds the SoC specific data
- * @dev:               The Device struct
- * @base:              The base address
- * @piac_offset:       The PIAC base may change depending on SoC
- * @irq:               The IRQ we are using
- * @port4:             The port4 mode on MT7620
- * @autopoll:          Is MDIO autopolling enabled
- * @ethsys:            The ethsys register map
- * @pctl:              The pin control register map
- * @clk_gsw:           The switch clock
- * @clk_gp1:           The gmac1 clock
- * @clk_gp2:           The gmac2 clock
- * @clk_trgpll:                The trgmii pll clock
- */
-struct mt7620_gsw {
-       struct device           *dev;
-       void __iomem            *base;
-       u32                     piac_offset;
-       int                     irq;
-       int                     port4;
-       unsigned long int       autopoll;
-
-       struct regmap           *ethsys;
-       struct regmap           *pctl;
-
-       struct clk              *clk_gsw;
-       struct clk              *clk_gp1;
-       struct clk              *clk_gp2;
-       struct clk              *clk_trgpll;
-};
-
-/* switch register I/O wrappers */
-void mtk_switch_w32(struct mt7620_gsw *gsw, u32 val, unsigned int reg);
-u32 mtk_switch_r32(struct mt7620_gsw *gsw, unsigned int reg);
-
-/* the callback used by the driver core to bringup the switch */
-int mtk_gsw_init(struct mtk_eth *eth);
-
-/* MDIO access wrappers */
-int mt7620_mdio_write(struct mii_bus *bus, int phy_addr, int phy_reg, u16 val);
-int mt7620_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg);
-void mt7620_mdio_link_adjust(struct mtk_eth *eth, int port);
-int mt7620_has_carrier(struct mtk_eth *eth);
-void mt7620_print_link_state(struct mtk_eth *eth, int port, int link,
-                            int speed, int duplex);
-void mt7530_mdio_w32(struct mt7620_gsw *gsw, u32 reg, u32 val);
-u32 mt7530_mdio_r32(struct mt7620_gsw *gsw, u32 reg);
-void mt7530_mdio_m32(struct mt7620_gsw *gsw, u32 mask, u32 set, u32 reg);
-
-u32 _mt7620_mii_write(struct mt7620_gsw *gsw, u32 phy_addr,
-                     u32 phy_register, u32 write_data);
-u32 _mt7620_mii_read(struct mt7620_gsw *gsw, int phy_addr, int phy_reg);
-void mt7620_handle_carrier(struct mtk_eth *eth);
-
-#endif
diff --git a/drivers/staging/mt7621-eth/gsw_mt7621.c b/drivers/staging/mt7621-eth/gsw_mt7621.c
deleted file mode 100644 (file)
index 53767b1..0000000
+++ /dev/null
@@ -1,297 +0,0 @@
-/*   This program is free software; you can redistribute it and/or modify
- *   it under the terms of the GNU General Public License as published by
- *   the Free Software Foundation; version 2 of the License
- *
- *   This program is distributed in the hope that it will be useful,
- *   but WITHOUT ANY WARRANTY; without even the implied warranty of
- *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *   GNU General Public License for more details.
- *
- *   Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
- *   Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
- *   Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/platform_device.h>
-#include <linux/of_device.h>
-#include <linux/of_irq.h>
-
-#include <ralink_regs.h>
-
-#include "mtk_eth_soc.h"
-#include "gsw_mt7620.h"
-
-void mtk_switch_w32(struct mt7620_gsw *gsw, u32 val, unsigned int reg)
-{
-       iowrite32(val, gsw->base + reg);
-}
-EXPORT_SYMBOL_GPL(mtk_switch_w32);
-
-u32 mtk_switch_r32(struct mt7620_gsw *gsw, unsigned int reg)
-{
-       return ioread32(gsw->base + reg);
-}
-EXPORT_SYMBOL_GPL(mtk_switch_r32);
-
-static irqreturn_t gsw_interrupt_mt7621(int irq, void *_eth)
-{
-       struct mtk_eth *eth = (struct mtk_eth *)_eth;
-       struct mt7620_gsw *gsw = (struct mt7620_gsw *)eth->sw_priv;
-       u32 reg, i;
-
-       reg = mt7530_mdio_r32(gsw, MT7530_SYS_INT_STS);
-
-       for (i = 0; i < 5; i++) {
-               unsigned int link;
-
-               if ((reg & BIT(i)) == 0)
-                       continue;
-
-               link = mt7530_mdio_r32(gsw, MT7530_PMSR_P(i)) & 0x1;
-
-               if (link == eth->link[i])
-                       continue;
-
-               eth->link[i] = link;
-               if (link)
-                       netdev_info(*eth->netdev,
-                                   "port %d link up\n", i);
-               else
-                       netdev_info(*eth->netdev,
-                                   "port %d link down\n", i);
-       }
-
-       mt7530_mdio_w32(gsw, MT7530_SYS_INT_STS, 0x1f);
-
-       return IRQ_HANDLED;
-}
-
-static void mt7621_hw_init(struct mtk_eth *eth, struct mt7620_gsw *gsw,
-                          struct device_node *np)
-{
-       u32 i;
-       u32 val;
-
-       /* hardware reset the switch */
-       mtk_reset(eth, RST_CTRL_MCM);
-       mdelay(10);
-
-       /* reduce RGMII2 PAD driving strength */
-       rt_sysc_m32(MT7621_MDIO_DRV_MASK, 0, SYSC_PAD_RGMII2_MDIO);
-
-       /* gpio mux - RGMII1=Normal mode */
-       rt_sysc_m32(BIT(14), 0, SYSC_GPIO_MODE);
-
-       /* set GMAC1 RGMII mode */
-       rt_sysc_m32(MT7621_GE1_MODE_MASK, 0, SYSC_REG_CFG1);
-
-       /* enable MDIO to control MT7530 */
-       rt_sysc_m32(3 << 12, 0, SYSC_GPIO_MODE);
-
-       /* turn off all PHYs */
-       for (i = 0; i <= 4; i++) {
-               val = _mt7620_mii_read(gsw, i, 0x0);
-               val |= BIT(11);
-               _mt7620_mii_write(gsw, i, 0x0, val);
-       }
-
-       /* reset the switch */
-       mt7530_mdio_w32(gsw, MT7530_SYS_CTRL,
-                       SYS_CTRL_SW_RST | SYS_CTRL_REG_RST);
-       usleep_range(10, 20);
-
-       if ((rt_sysc_r32(SYSC_REG_CHIP_REV_ID) & 0xFFFF) == 0x0101) {
-               /* GE1, Force 1000M/FD, FC ON, MAX_RX_LENGTH 1536 */
-               mtk_switch_w32(gsw, MAC_MCR_FIXED_LINK, MTK_MAC_P2_MCR);
-               mt7530_mdio_w32(gsw, MT7530_PMCR_P(6), PMCR_FIXED_LINK);
-       } else {
-               /* GE1, Force 1000M/FD, FC ON, MAX_RX_LENGTH 1536 */
-               mtk_switch_w32(gsw, MAC_MCR_FIXED_LINK_FC, MTK_MAC_P1_MCR);
-               mt7530_mdio_w32(gsw, MT7530_PMCR_P(6), PMCR_FIXED_LINK_FC);
-       }
-
-       /* GE2, Link down */
-       mtk_switch_w32(gsw, MAC_MCR_FORCE_MODE, MTK_MAC_P2_MCR);
-
-       /* Enable Port 6, P5 as GMAC5, P5 disable */
-       val = mt7530_mdio_r32(gsw, MT7530_MHWTRAP);
-       /* Enable Port 6 */
-       val &= ~MHWTRAP_P6_DIS;
-       /* Disable Port 5 */
-       val |= MHWTRAP_P5_DIS;
-       /* manual override of HW-Trap */
-       val |= MHWTRAP_MANUAL;
-       mt7530_mdio_w32(gsw, MT7530_MHWTRAP, val);
-
-       val = rt_sysc_r32(SYSC_REG_CFG);
-       val = (val >> MT7621_XTAL_SHIFT) & MT7621_XTAL_MASK;
-       if (val < MT7621_XTAL_25 && val >= MT7621_XTAL_40) {
-               /* 40Mhz */
-
-               /* disable MT7530 core clock */
-               _mt7620_mii_write(gsw, 0, 13, 0x1f);
-               _mt7620_mii_write(gsw, 0, 14, 0x410);
-               _mt7620_mii_write(gsw, 0, 13, 0x401f);
-               _mt7620_mii_write(gsw, 0, 14, 0x0);
-
-               /* disable MT7530 PLL */
-               _mt7620_mii_write(gsw, 0, 13, 0x1f);
-               _mt7620_mii_write(gsw, 0, 14, 0x40d);
-               _mt7620_mii_write(gsw, 0, 13, 0x401f);
-               _mt7620_mii_write(gsw, 0, 14, 0x2020);
-
-               /* for MT7530 core clock = 500Mhz */
-               _mt7620_mii_write(gsw, 0, 13, 0x1f);
-               _mt7620_mii_write(gsw, 0, 14, 0x40e);
-               _mt7620_mii_write(gsw, 0, 13, 0x401f);
-               _mt7620_mii_write(gsw, 0, 14, 0x119);
-
-               /* enable MT7530 PLL */
-               _mt7620_mii_write(gsw, 0, 13, 0x1f);
-               _mt7620_mii_write(gsw, 0, 14, 0x40d);
-               _mt7620_mii_write(gsw, 0, 13, 0x401f);
-               _mt7620_mii_write(gsw, 0, 14, 0x2820);
-
-               usleep_range(20, 40);
-
-               /* enable MT7530 core clock */
-               _mt7620_mii_write(gsw, 0, 13, 0x1f);
-               _mt7620_mii_write(gsw, 0, 14, 0x410);
-               _mt7620_mii_write(gsw, 0, 13, 0x401f);
-       }
-
-       /* RGMII */
-       _mt7620_mii_write(gsw, 0, 14, 0x1);
-
-       /* set MT7530 central align */
-       mt7530_mdio_m32(gsw, BIT(0), P6ECR_INTF_MODE_RGMII, MT7530_P6ECR);
-       mt7530_mdio_m32(gsw, TRGMII_TXCTRL_TXC_INV, 0,
-                       MT7530_TRGMII_TXCTRL);
-       mt7530_mdio_w32(gsw, MT7530_TRGMII_TCK_CTRL, 0x855);
-
-       /* delay setting for 10/1000M */
-       mt7530_mdio_w32(gsw, MT7530_P5RGMIIRXCR,
-                       P5RGMIIRXCR_C_ALIGN | P5RGMIIRXCR_DELAY_2);
-       mt7530_mdio_w32(gsw, MT7530_P5RGMIITXCR, 0x14);
-
-       /* lower Tx Driving*/
-       mt7530_mdio_w32(gsw, MT7530_TRGMII_TD0_ODT, 0x44);
-       mt7530_mdio_w32(gsw, MT7530_TRGMII_TD1_ODT, 0x44);
-       mt7530_mdio_w32(gsw, MT7530_TRGMII_TD2_ODT, 0x44);
-       mt7530_mdio_w32(gsw, MT7530_TRGMII_TD3_ODT, 0x44);
-       mt7530_mdio_w32(gsw, MT7530_TRGMII_TD4_ODT, 0x44);
-       mt7530_mdio_w32(gsw, MT7530_TRGMII_TD5_ODT, 0x44);
-
-       /* turn on all PHYs */
-       for (i = 0; i <= 4; i++) {
-               val = _mt7620_mii_read(gsw, i, 0);
-               val &= ~BIT(11);
-               _mt7620_mii_write(gsw, i, 0, val);
-       }
-
-#define MT7530_NUM_PORTS 8
-#define REG_ESW_PORT_PCR(x)    (0x2004 | ((x) << 8))
-#define REG_ESW_PORT_PVC(x)    (0x2010 | ((x) << 8))
-#define REG_ESW_PORT_PPBV1(x)  (0x2014 | ((x) << 8))
-#define MT7530_CPU_PORT                6
-
-       /* This is copied from mt7530_apply_config in libreCMC driver */
-       {
-               int i;
-
-               for (i = 0; i < MT7530_NUM_PORTS; i++)
-                       mt7530_mdio_w32(gsw, REG_ESW_PORT_PCR(i), 0x00400000);
-
-               mt7530_mdio_w32(gsw, REG_ESW_PORT_PCR(MT7530_CPU_PORT),
-                               0x00ff0000);
-
-               for (i = 0; i < MT7530_NUM_PORTS; i++)
-                       mt7530_mdio_w32(gsw, REG_ESW_PORT_PVC(i), 0x810000c0);
-       }
-
-       /* enable irq */
-       mt7530_mdio_m32(gsw, 0, 3 << 16, MT7530_TOP_SIG_CTRL);
-       mt7530_mdio_w32(gsw, MT7530_SYS_INT_EN, 0x1f);
-}
-
-static const struct of_device_id mediatek_gsw_match[] = {
-       { .compatible = "mediatek,mt7621-gsw" },
-       {},
-};
-MODULE_DEVICE_TABLE(of, mediatek_gsw_match);
-
-int mtk_gsw_init(struct mtk_eth *eth)
-{
-       struct device_node *np = eth->switch_np;
-       struct platform_device *pdev = of_find_device_by_node(np);
-       struct mt7620_gsw *gsw;
-
-       if (!pdev)
-               return -ENODEV;
-
-       if (!of_device_is_compatible(np, mediatek_gsw_match->compatible))
-               return -EINVAL;
-
-       gsw = platform_get_drvdata(pdev);
-       eth->sw_priv = gsw;
-
-       if (!gsw->irq)
-               return -EINVAL;
-
-       request_irq(gsw->irq, gsw_interrupt_mt7621, 0,
-                   "gsw", eth);
-       disable_irq(gsw->irq);
-
-       mt7621_hw_init(eth, gsw, np);
-
-       enable_irq(gsw->irq);
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(mtk_gsw_init);
-
-static int mt7621_gsw_probe(struct platform_device *pdev)
-{
-       struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       struct mt7620_gsw *gsw;
-
-       gsw = devm_kzalloc(&pdev->dev, sizeof(struct mt7620_gsw), GFP_KERNEL);
-       if (!gsw)
-               return -ENOMEM;
-
-       gsw->base = devm_ioremap_resource(&pdev->dev, res);
-       if (IS_ERR(gsw->base))
-               return PTR_ERR(gsw->base);
-
-       gsw->dev = &pdev->dev;
-       gsw->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
-
-       platform_set_drvdata(pdev, gsw);
-
-       return 0;
-}
-
-static int mt7621_gsw_remove(struct platform_device *pdev)
-{
-       platform_set_drvdata(pdev, NULL);
-
-       return 0;
-}
-
-static struct platform_driver gsw_driver = {
-       .probe = mt7621_gsw_probe,
-       .remove = mt7621_gsw_remove,
-       .driver = {
-               .name = "mt7621-gsw",
-               .of_match_table = mediatek_gsw_match,
-       },
-};
-
-module_platform_driver(gsw_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
-MODULE_DESCRIPTION("GBit switch driver for Mediatek MT7621 SoC");
diff --git a/drivers/staging/mt7621-eth/mdio.c b/drivers/staging/mt7621-eth/mdio.c
deleted file mode 100644 (file)
index 5fea6a4..0000000
+++ /dev/null
@@ -1,275 +0,0 @@
-/*   This program is free software; you can redistribute it and/or modify
- *   it under the terms of the GNU General Public License as published by
- *   the Free Software Foundation; version 2 of the License
- *
- *   Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
- *   Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
- *   Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/phy.h>
-#include <linux/of_net.h>
-#include <linux/of_mdio.h>
-
-#include "mtk_eth_soc.h"
-#include "mdio.h"
-
-static int mtk_mdio_reset(struct mii_bus *bus)
-{
-       /* TODO */
-       return 0;
-}
-
-static void mtk_phy_link_adjust(struct net_device *dev)
-{
-       struct mtk_eth *eth = netdev_priv(dev);
-       unsigned long flags;
-       int i;
-
-       spin_lock_irqsave(&eth->phy->lock, flags);
-       for (i = 0; i < 8; i++) {
-               if (eth->phy->phy_node[i]) {
-                       struct phy_device *phydev = eth->phy->phy[i];
-                       int status_change = 0;
-
-                       if (phydev->link)
-                               if (eth->phy->duplex[i] != phydev->duplex ||
-                                   eth->phy->speed[i] != phydev->speed)
-                                       status_change = 1;
-
-                       if (phydev->link != eth->link[i])
-                               status_change = 1;
-
-                       switch (phydev->speed) {
-                       case SPEED_1000:
-                       case SPEED_100:
-                       case SPEED_10:
-                               eth->link[i] = phydev->link;
-                               eth->phy->duplex[i] = phydev->duplex;
-                               eth->phy->speed[i] = phydev->speed;
-
-                               if (status_change &&
-                                   eth->soc->mdio_adjust_link)
-                                       eth->soc->mdio_adjust_link(eth, i);
-                               break;
-                       }
-               }
-       }
-       spin_unlock_irqrestore(&eth->phy->lock, flags);
-}
-
-int mtk_connect_phy_node(struct mtk_eth *eth, struct mtk_mac *mac,
-                        struct device_node *phy_node)
-{
-       const __be32 *_port = NULL;
-       struct phy_device *phydev;
-       int phy_mode, port;
-
-       _port = of_get_property(phy_node, "reg", NULL);
-
-       if (!_port || (be32_to_cpu(*_port) >= 0x20)) {
-               pr_err("%pOFn: invalid port id\n", phy_node);
-               return -EINVAL;
-       }
-       port = be32_to_cpu(*_port);
-       phy_mode = of_get_phy_mode(phy_node);
-       if (phy_mode < 0) {
-               dev_err(eth->dev, "incorrect phy-mode %d\n", phy_mode);
-               eth->phy->phy_node[port] = NULL;
-               return -EINVAL;
-       }
-
-       phydev = of_phy_connect(eth->netdev[mac->id], phy_node,
-                               mtk_phy_link_adjust, 0, phy_mode);
-       if (!phydev) {
-               dev_err(eth->dev, "could not connect to PHY\n");
-               eth->phy->phy_node[port] = NULL;
-               return -ENODEV;
-       }
-
-       phydev->supported &= PHY_1000BT_FEATURES;
-       phydev->advertising = phydev->supported;
-
-       dev_info(eth->dev,
-                "connected port %d to PHY at %s [uid=%08x, driver=%s]\n",
-                port, phydev_name(phydev), phydev->phy_id,
-                phydev->drv->name);
-
-       eth->phy->phy[port] = phydev;
-       eth->link[port] = 0;
-
-       return 0;
-}
-
-static void phy_init(struct mtk_eth *eth, struct mtk_mac *mac,
-                    struct phy_device *phy)
-{
-       phy_attach(eth->netdev[mac->id], phydev_name(phy),
-                  PHY_INTERFACE_MODE_MII);
-
-       phy->autoneg = AUTONEG_ENABLE;
-       phy->speed = 0;
-       phy->duplex = 0;
-       phy_set_max_speed(phy, SPEED_100);
-       phy->advertising = phy->supported | ADVERTISED_Autoneg;
-
-       phy_start_aneg(phy);
-}
-
-static int mtk_phy_connect(struct mtk_mac *mac)
-{
-       struct mtk_eth *eth = mac->hw;
-       int i;
-
-       for (i = 0; i < 8; i++) {
-               if (eth->phy->phy_node[i]) {
-                       if (!mac->phy_dev) {
-                               mac->phy_dev = eth->phy->phy[i];
-                               mac->phy_flags = MTK_PHY_FLAG_PORT;
-                       }
-               } else if (eth->mii_bus) {
-                       struct phy_device *phy;
-
-                       phy = mdiobus_get_phy(eth->mii_bus, i);
-                       if (phy) {
-                               phy_init(eth, mac, phy);
-                               if (!mac->phy_dev) {
-                                       mac->phy_dev = phy;
-                                       mac->phy_flags = MTK_PHY_FLAG_ATTACH;
-                               }
-                       }
-               }
-       }
-
-       return 0;
-}
-
-static void mtk_phy_disconnect(struct mtk_mac *mac)
-{
-       struct mtk_eth *eth = mac->hw;
-       unsigned long flags;
-       int i;
-
-       for (i = 0; i < 8; i++)
-               if (eth->phy->phy_fixed[i]) {
-                       spin_lock_irqsave(&eth->phy->lock, flags);
-                       eth->link[i] = 0;
-                       if (eth->soc->mdio_adjust_link)
-                               eth->soc->mdio_adjust_link(eth, i);
-                       spin_unlock_irqrestore(&eth->phy->lock, flags);
-               } else if (eth->phy->phy[i]) {
-                       phy_disconnect(eth->phy->phy[i]);
-               } else if (eth->mii_bus) {
-                       struct phy_device *phy =
-                               mdiobus_get_phy(eth->mii_bus, i);
-
-                       if (phy)
-                               phy_detach(phy);
-               }
-}
-
-static void mtk_phy_start(struct mtk_mac *mac)
-{
-       struct mtk_eth *eth = mac->hw;
-       unsigned long flags;
-       int i;
-
-       for (i = 0; i < 8; i++) {
-               if (eth->phy->phy_fixed[i]) {
-                       spin_lock_irqsave(&eth->phy->lock, flags);
-                       eth->link[i] = 1;
-                       if (eth->soc->mdio_adjust_link)
-                               eth->soc->mdio_adjust_link(eth, i);
-                       spin_unlock_irqrestore(&eth->phy->lock, flags);
-               } else if (eth->phy->phy[i]) {
-                       phy_start(eth->phy->phy[i]);
-               }
-       }
-}
-
-static void mtk_phy_stop(struct mtk_mac *mac)
-{
-       struct mtk_eth *eth = mac->hw;
-       unsigned long flags;
-       int i;
-
-       for (i = 0; i < 8; i++)
-               if (eth->phy->phy_fixed[i]) {
-                       spin_lock_irqsave(&eth->phy->lock, flags);
-                       eth->link[i] = 0;
-                       if (eth->soc->mdio_adjust_link)
-                               eth->soc->mdio_adjust_link(eth, i);
-                       spin_unlock_irqrestore(&eth->phy->lock, flags);
-               } else if (eth->phy->phy[i]) {
-                       phy_stop(eth->phy->phy[i]);
-               }
-}
-
-static struct mtk_phy phy_ralink = {
-       .connect = mtk_phy_connect,
-       .disconnect = mtk_phy_disconnect,
-       .start = mtk_phy_start,
-       .stop = mtk_phy_stop,
-};
-
-int mtk_mdio_init(struct mtk_eth *eth)
-{
-       struct device_node *mii_np;
-       int err;
-
-       if (!eth->soc->mdio_read || !eth->soc->mdio_write)
-               return 0;
-
-       spin_lock_init(&phy_ralink.lock);
-       eth->phy = &phy_ralink;
-
-       mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
-       if (!mii_np) {
-               dev_err(eth->dev, "no %s child node found", "mdio-bus");
-               return -ENODEV;
-       }
-
-       if (!of_device_is_available(mii_np)) {
-               err = 0;
-               goto err_put_node;
-       }
-
-       eth->mii_bus = mdiobus_alloc();
-       if (!eth->mii_bus) {
-               err = -ENOMEM;
-               goto err_put_node;
-       }
-
-       eth->mii_bus->name = "mdio";
-       eth->mii_bus->read = eth->soc->mdio_read;
-       eth->mii_bus->write = eth->soc->mdio_write;
-       eth->mii_bus->reset = mtk_mdio_reset;
-       eth->mii_bus->priv = eth;
-       eth->mii_bus->parent = eth->dev;
-
-       snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np);
-       err = of_mdiobus_register(eth->mii_bus, mii_np);
-       if (err)
-               goto err_free_bus;
-
-       return 0;
-
-err_free_bus:
-       kfree(eth->mii_bus);
-err_put_node:
-       of_node_put(mii_np);
-       eth->mii_bus = NULL;
-       return err;
-}
-
-void mtk_mdio_cleanup(struct mtk_eth *eth)
-{
-       if (!eth->mii_bus)
-               return;
-
-       mdiobus_unregister(eth->mii_bus);
-       of_node_put(eth->mii_bus->dev.of_node);
-       kfree(eth->mii_bus);
-}
diff --git a/drivers/staging/mt7621-eth/mdio.h b/drivers/staging/mt7621-eth/mdio.h
deleted file mode 100644 (file)
index b14e238..0000000
+++ /dev/null
@@ -1,27 +0,0 @@
-/*   This program is free software; you can redistribute it and/or modify
- *   it under the terms of the GNU General Public License as published by
- *   the Free Software Foundation; version 2 of the License
- *
- *   This program is distributed in the hope that it will be useful,
- *   but WITHOUT ANY WARRANTY; without even the implied warranty of
- *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *   GNU General Public License for more details.
- *
- *   Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
- *   Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
- *   Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
- */
-
-#ifndef _RALINK_MDIO_H__
-#define _RALINK_MDIO_H__
-
-#ifdef CONFIG_NET_MEDIATEK_MDIO
-int mtk_mdio_init(struct mtk_eth *eth);
-void mtk_mdio_cleanup(struct mtk_eth *eth);
-int mtk_connect_phy_node(struct mtk_eth *eth, struct mtk_mac *mac,
-                        struct device_node *phy_node);
-#else
-static inline int mtk_mdio_init(struct mtk_eth *eth) { return 0; }
-static inline void mtk_mdio_cleanup(struct mtk_eth *eth) {}
-#endif
-#endif
diff --git a/drivers/staging/mt7621-eth/mdio_mt7620.c b/drivers/staging/mt7621-eth/mdio_mt7620.c
deleted file mode 100644 (file)
index ced605c..0000000
+++ /dev/null
@@ -1,173 +0,0 @@
-/*   This program is free software; you can redistribute it and/or modify
- *   it under the terms of the GNU General Public License as published by
- *   the Free Software Foundation; version 2 of the License
- *
- *   This program is distributed in the hope that it will be useful,
- *   but WITHOUT ANY WARRANTY; without even the implied warranty of
- *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *   GNU General Public License for more details.
- *
- *   Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
- *   Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
- *   Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-
-#include "mtk_eth_soc.h"
-#include "gsw_mt7620.h"
-#include "mdio.h"
-
-static int mt7620_mii_busy_wait(struct mt7620_gsw *gsw)
-{
-       unsigned long t_start = jiffies;
-
-       while (1) {
-               if (!(mtk_switch_r32(gsw,
-                                    gsw->piac_offset + MT7620_GSW_REG_PIAC) &
-                                    GSW_MDIO_ACCESS))
-                       return 0;
-               if (time_after(jiffies, t_start + GSW_REG_PHY_TIMEOUT))
-                       break;
-       }
-
-       dev_err(gsw->dev, "mdio: MDIO timeout\n");
-       return -1;
-}
-
-u32 _mt7620_mii_write(struct mt7620_gsw *gsw, u32 phy_addr,
-                     u32 phy_register, u32 write_data)
-{
-       if (mt7620_mii_busy_wait(gsw))
-               return -1;
-
-       write_data &= 0xffff;
-
-       mtk_switch_w32(gsw, GSW_MDIO_ACCESS | GSW_MDIO_START | GSW_MDIO_WRITE |
-               (phy_register << GSW_MDIO_REG_SHIFT) |
-               (phy_addr << GSW_MDIO_ADDR_SHIFT) | write_data,
-               MT7620_GSW_REG_PIAC);
-
-       if (mt7620_mii_busy_wait(gsw))
-               return -1;
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(_mt7620_mii_write);
-
-u32 _mt7620_mii_read(struct mt7620_gsw *gsw, int phy_addr, int phy_reg)
-{
-       u32 d;
-
-       if (mt7620_mii_busy_wait(gsw))
-               return 0xffff;
-
-       mtk_switch_w32(gsw, GSW_MDIO_ACCESS | GSW_MDIO_START | GSW_MDIO_READ |
-               (phy_reg << GSW_MDIO_REG_SHIFT) |
-               (phy_addr << GSW_MDIO_ADDR_SHIFT),
-               MT7620_GSW_REG_PIAC);
-
-       if (mt7620_mii_busy_wait(gsw))
-               return 0xffff;
-
-       d = mtk_switch_r32(gsw, MT7620_GSW_REG_PIAC) & 0xffff;
-
-       return d;
-}
-EXPORT_SYMBOL_GPL(_mt7620_mii_read);
-
-int mt7620_mdio_write(struct mii_bus *bus, int phy_addr, int phy_reg, u16 val)
-{
-       struct mtk_eth *eth = bus->priv;
-       struct mt7620_gsw *gsw = (struct mt7620_gsw *)eth->sw_priv;
-
-       return _mt7620_mii_write(gsw, phy_addr, phy_reg, val);
-}
-
-int mt7620_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
-{
-       struct mtk_eth *eth = bus->priv;
-       struct mt7620_gsw *gsw = (struct mt7620_gsw *)eth->sw_priv;
-
-       return _mt7620_mii_read(gsw, phy_addr, phy_reg);
-}
-
-void mt7530_mdio_w32(struct mt7620_gsw *gsw, u32 reg, u32 val)
-{
-       _mt7620_mii_write(gsw, 0x1f, 0x1f, (reg >> 6) & 0x3ff);
-       _mt7620_mii_write(gsw, 0x1f, (reg >> 2) & 0xf,  val & 0xffff);
-       _mt7620_mii_write(gsw, 0x1f, 0x10, val >> 16);
-}
-EXPORT_SYMBOL_GPL(mt7530_mdio_w32);
-
-u32 mt7530_mdio_r32(struct mt7620_gsw *gsw, u32 reg)
-{
-       u16 high, low;
-
-       _mt7620_mii_write(gsw, 0x1f, 0x1f, (reg >> 6) & 0x3ff);
-       low = _mt7620_mii_read(gsw, 0x1f, (reg >> 2) & 0xf);
-       high = _mt7620_mii_read(gsw, 0x1f, 0x10);
-
-       return (high << 16) | (low & 0xffff);
-}
-EXPORT_SYMBOL_GPL(mt7530_mdio_r32);
-
-void mt7530_mdio_m32(struct mt7620_gsw *gsw, u32 mask, u32 set, u32 reg)
-{
-       u32 val = mt7530_mdio_r32(gsw, reg);
-
-       val &= ~mask;
-       val |= set;
-       mt7530_mdio_w32(gsw, reg, val);
-}
-EXPORT_SYMBOL_GPL(mt7530_mdio_m32);
-
-static unsigned char *mtk_speed_str(int speed)
-{
-       switch (speed) {
-       case 2:
-       case SPEED_1000:
-               return "1000";
-       case 1:
-       case SPEED_100:
-               return "100";
-       case 0:
-       case SPEED_10:
-               return "10";
-       }
-
-       return "? ";
-}
-
-int mt7620_has_carrier(struct mtk_eth *eth)
-{
-       struct mt7620_gsw *gsw = (struct mt7620_gsw *)eth->sw_priv;
-       int i;
-
-       for (i = 0; i < GSW_PORT6; i++)
-               if (mt7530_mdio_r32(gsw, GSW_REG_PORT_STATUS(i)) & 0x1)
-                       return 1;
-       return 0;
-}
-
-void mt7620_print_link_state(struct mtk_eth *eth, int port, int link,
-                            int speed, int duplex)
-{
-       struct mt7620_gsw *gsw = eth->sw_priv;
-
-       if (link)
-               dev_info(gsw->dev, "port %d link up (%sMbps/%s duplex)\n",
-                        port, mtk_speed_str(speed),
-                        (duplex) ? "Full" : "Half");
-       else
-               dev_info(gsw->dev, "port %d link down\n", port);
-}
-
-void mt7620_mdio_link_adjust(struct mtk_eth *eth, int port)
-{
-       mt7620_print_link_state(eth, port, eth->link[port],
-                               eth->phy->speed[port],
-                               (eth->phy->duplex[port] == DUPLEX_FULL));
-}
diff --git a/drivers/staging/mt7621-eth/mtk_eth_soc.c b/drivers/staging/mt7621-eth/mtk_eth_soc.c
deleted file mode 100644 (file)
index 6027b19..0000000
+++ /dev/null
@@ -1,2176 +0,0 @@
-/*   This program is free software; you can redistribute it and/or modify
- *   it under the terms of the GNU General Public License as published by
- *   the Free Software Foundation; version 2 of the License
- *
- *   This program is distributed in the hope that it will be useful,
- *   but WITHOUT ANY WARRANTY; without even the implied warranty of
- *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *   GNU General Public License for more details.
- *
- *   Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
- *   Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
- *   Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/dma-mapping.h>
-#include <linux/init.h>
-#include <linux/skbuff.h>
-#include <linux/etherdevice.h>
-#include <linux/ethtool.h>
-#include <linux/platform_device.h>
-#include <linux/of_device.h>
-#include <linux/mfd/syscon.h>
-#include <linux/clk.h>
-#include <linux/of_net.h>
-#include <linux/of_mdio.h>
-#include <linux/if_vlan.h>
-#include <linux/reset.h>
-#include <linux/tcp.h>
-#include <linux/io.h>
-#include <linux/bug.h>
-#include <linux/regmap.h>
-
-#include "mtk_eth_soc.h"
-#include "mdio.h"
-#include "ethtool.h"
-
-#define        MAX_RX_LENGTH           1536
-#define MTK_RX_ETH_HLEN                (VLAN_ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)
-#define MTK_RX_HLEN            (NET_SKB_PAD + MTK_RX_ETH_HLEN + NET_IP_ALIGN)
-#define DMA_DUMMY_DESC         0xffffffff
-#define MTK_DEFAULT_MSG_ENABLE \
-               (NETIF_MSG_DRV | \
-               NETIF_MSG_PROBE | \
-               NETIF_MSG_LINK | \
-               NETIF_MSG_TIMER | \
-               NETIF_MSG_IFDOWN | \
-               NETIF_MSG_IFUP | \
-               NETIF_MSG_RX_ERR | \
-               NETIF_MSG_TX_ERR)
-
-#define TX_DMA_DESP2_DEF       (TX_DMA_LS0 | TX_DMA_DONE)
-#define NEXT_TX_DESP_IDX(X)    (((X) + 1) & (ring->tx_ring_size - 1))
-#define NEXT_RX_DESP_IDX(X)    (((X) + 1) & (ring->rx_ring_size - 1))
-
-#define SYSC_REG_RSTCTRL       0x34
-
-static int mtk_msg_level = -1;
-module_param_named(msg_level, mtk_msg_level, int, 0);
-MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
-
-static const u16 mtk_reg_table_default[MTK_REG_COUNT] = {
-       [MTK_REG_PDMA_GLO_CFG] = MTK_PDMA_GLO_CFG,
-       [MTK_REG_PDMA_RST_CFG] = MTK_PDMA_RST_CFG,
-       [MTK_REG_DLY_INT_CFG] = MTK_DLY_INT_CFG,
-       [MTK_REG_TX_BASE_PTR0] = MTK_TX_BASE_PTR0,
-       [MTK_REG_TX_MAX_CNT0] = MTK_TX_MAX_CNT0,
-       [MTK_REG_TX_CTX_IDX0] = MTK_TX_CTX_IDX0,
-       [MTK_REG_TX_DTX_IDX0] = MTK_TX_DTX_IDX0,
-       [MTK_REG_RX_BASE_PTR0] = MTK_RX_BASE_PTR0,
-       [MTK_REG_RX_MAX_CNT0] = MTK_RX_MAX_CNT0,
-       [MTK_REG_RX_CALC_IDX0] = MTK_RX_CALC_IDX0,
-       [MTK_REG_RX_DRX_IDX0] = MTK_RX_DRX_IDX0,
-       [MTK_REG_MTK_INT_ENABLE] = MTK_INT_ENABLE,
-       [MTK_REG_MTK_INT_STATUS] = MTK_INT_STATUS,
-       [MTK_REG_MTK_DMA_VID_BASE] = MTK_DMA_VID0,
-       [MTK_REG_MTK_COUNTER_BASE] = MTK_GDMA1_TX_GBCNT,
-       [MTK_REG_MTK_RST_GL] = MTK_RST_GL,
-};
-
-static const u16 *mtk_reg_table = mtk_reg_table_default;
-
-void mtk_w32(struct mtk_eth *eth, u32 val, unsigned int reg)
-{
-       __raw_writel(val, eth->base + reg);
-}
-
-u32 mtk_r32(struct mtk_eth *eth, unsigned int reg)
-{
-       return __raw_readl(eth->base + reg);
-}
-
-static void mtk_reg_w32(struct mtk_eth *eth, u32 val, enum mtk_reg reg)
-{
-       mtk_w32(eth, val, mtk_reg_table[reg]);
-}
-
-static u32 mtk_reg_r32(struct mtk_eth *eth, enum mtk_reg reg)
-{
-       return mtk_r32(eth, mtk_reg_table[reg]);
-}
-
-/* these bits are also exposed via the reset-controller API. however the switch
- * and FE need to be brought out of reset in the exakt same moemtn and the
- * reset-controller api does not provide this feature yet. Do the reset manually
- * until we fixed the reset-controller api to be able to do this
- */
-void mtk_reset(struct mtk_eth *eth, u32 reset_bits)
-{
-       u32 val;
-
-       regmap_read(eth->ethsys, SYSC_REG_RSTCTRL, &val);
-       val |= reset_bits;
-       regmap_write(eth->ethsys, SYSC_REG_RSTCTRL, val);
-       usleep_range(10, 20);
-       val &= ~reset_bits;
-       regmap_write(eth->ethsys, SYSC_REG_RSTCTRL, val);
-       usleep_range(10, 20);
-}
-EXPORT_SYMBOL(mtk_reset);
-
-static inline void mtk_irq_ack(struct mtk_eth *eth, u32 mask)
-{
-       if (eth->soc->dma_type & MTK_PDMA)
-               mtk_reg_w32(eth, mask, MTK_REG_MTK_INT_STATUS);
-       if (eth->soc->dma_type & MTK_QDMA)
-               mtk_w32(eth, mask, MTK_QMTK_INT_STATUS);
-}
-
-static inline u32 mtk_irq_pending(struct mtk_eth *eth)
-{
-       u32 status = 0;
-
-       if (eth->soc->dma_type & MTK_PDMA)
-               status |= mtk_reg_r32(eth, MTK_REG_MTK_INT_STATUS);
-       if (eth->soc->dma_type & MTK_QDMA)
-               status |= mtk_r32(eth, MTK_QMTK_INT_STATUS);
-
-       return status;
-}
-
-static void mtk_irq_ack_status(struct mtk_eth *eth, u32 mask)
-{
-       u32 status_reg = MTK_REG_MTK_INT_STATUS;
-
-       if (mtk_reg_table[MTK_REG_MTK_INT_STATUS2])
-               status_reg = MTK_REG_MTK_INT_STATUS2;
-
-       mtk_reg_w32(eth, mask, status_reg);
-}
-
-static u32 mtk_irq_pending_status(struct mtk_eth *eth)
-{
-       u32 status_reg = MTK_REG_MTK_INT_STATUS;
-
-       if (mtk_reg_table[MTK_REG_MTK_INT_STATUS2])
-               status_reg = MTK_REG_MTK_INT_STATUS2;
-
-       return mtk_reg_r32(eth, status_reg);
-}
-
-static inline void mtk_irq_disable(struct mtk_eth *eth, u32 mask)
-{
-       u32 val;
-
-       if (eth->soc->dma_type & MTK_PDMA) {
-               val = mtk_reg_r32(eth, MTK_REG_MTK_INT_ENABLE);
-               mtk_reg_w32(eth, val & ~mask, MTK_REG_MTK_INT_ENABLE);
-               /* flush write */
-               mtk_reg_r32(eth, MTK_REG_MTK_INT_ENABLE);
-       }
-       if (eth->soc->dma_type & MTK_QDMA) {
-               val = mtk_r32(eth, MTK_QMTK_INT_ENABLE);
-               mtk_w32(eth, val & ~mask, MTK_QMTK_INT_ENABLE);
-               /* flush write */
-               mtk_r32(eth, MTK_QMTK_INT_ENABLE);
-       }
-}
-
-static inline void mtk_irq_enable(struct mtk_eth *eth, u32 mask)
-{
-       u32 val;
-
-       if (eth->soc->dma_type & MTK_PDMA) {
-               val = mtk_reg_r32(eth, MTK_REG_MTK_INT_ENABLE);
-               mtk_reg_w32(eth, val | mask, MTK_REG_MTK_INT_ENABLE);
-               /* flush write */
-               mtk_reg_r32(eth, MTK_REG_MTK_INT_ENABLE);
-       }
-       if (eth->soc->dma_type & MTK_QDMA) {
-               val = mtk_r32(eth, MTK_QMTK_INT_ENABLE);
-               mtk_w32(eth, val | mask, MTK_QMTK_INT_ENABLE);
-               /* flush write */
-               mtk_r32(eth, MTK_QMTK_INT_ENABLE);
-       }
-}
-
-static inline u32 mtk_irq_enabled(struct mtk_eth *eth)
-{
-       u32 enabled = 0;
-
-       if (eth->soc->dma_type & MTK_PDMA)
-               enabled |= mtk_reg_r32(eth, MTK_REG_MTK_INT_ENABLE);
-       if (eth->soc->dma_type & MTK_QDMA)
-               enabled |= mtk_r32(eth, MTK_QMTK_INT_ENABLE);
-
-       return enabled;
-}
-
-static inline void mtk_hw_set_macaddr(struct mtk_mac *mac,
-                                     unsigned char *macaddr)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&mac->hw->page_lock, flags);
-       mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1], MTK_GDMA1_MAC_ADRH);
-       mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
-               (macaddr[4] << 8) | macaddr[5],
-               MTK_GDMA1_MAC_ADRL);
-       spin_unlock_irqrestore(&mac->hw->page_lock, flags);
-}
-
-static int mtk_set_mac_address(struct net_device *dev, void *p)
-{
-       int ret = eth_mac_addr(dev, p);
-       struct mtk_mac *mac = netdev_priv(dev);
-       struct mtk_eth *eth = mac->hw;
-
-       if (ret)
-               return ret;
-
-       if (eth->soc->set_mac)
-               eth->soc->set_mac(mac, dev->dev_addr);
-       else
-               mtk_hw_set_macaddr(mac, p);
-
-       return 0;
-}
-
-static inline int mtk_max_frag_size(int mtu)
-{
-       /* make sure buf_size will be at least MAX_RX_LENGTH */
-       if (mtu + MTK_RX_ETH_HLEN < MAX_RX_LENGTH)
-               mtu = MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
-
-       return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
-               SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
-}
-
-static inline int mtk_max_buf_size(int frag_size)
-{
-       int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
-                      SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
-
-       WARN_ON(buf_size < MAX_RX_LENGTH);
-
-       return buf_size;
-}
-
-static inline void mtk_get_rxd(struct mtk_rx_dma *rxd,
-                              struct mtk_rx_dma *dma_rxd)
-{
-       rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
-       rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
-       rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
-       rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
-}
-
-static inline void mtk_set_txd_pdma(struct mtk_tx_dma *txd,
-                                   struct mtk_tx_dma *dma_txd)
-{
-       WRITE_ONCE(dma_txd->txd1, txd->txd1);
-       WRITE_ONCE(dma_txd->txd3, txd->txd3);
-       WRITE_ONCE(dma_txd->txd4, txd->txd4);
-       /* clean dma done flag last */
-       WRITE_ONCE(dma_txd->txd2, txd->txd2);
-}
-
-static void mtk_clean_rx(struct mtk_eth *eth, struct mtk_rx_ring *ring)
-{
-       int i;
-
-       if (ring->rx_data && ring->rx_dma) {
-               for (i = 0; i < ring->rx_ring_size; i++) {
-                       if (!ring->rx_data[i])
-                               continue;
-                       if (!ring->rx_dma[i].rxd1)
-                               continue;
-                       dma_unmap_single(eth->dev,
-                                        ring->rx_dma[i].rxd1,
-                                        ring->rx_buf_size,
-                                        DMA_FROM_DEVICE);
-                       skb_free_frag(ring->rx_data[i]);
-               }
-               kfree(ring->rx_data);
-               ring->rx_data = NULL;
-       }
-
-       if (ring->rx_dma) {
-               dma_free_coherent(eth->dev,
-                                 ring->rx_ring_size * sizeof(*ring->rx_dma),
-                                 ring->rx_dma,
-                                 ring->rx_phys);
-               ring->rx_dma = NULL;
-       }
-}
-
-static int mtk_dma_rx_alloc(struct mtk_eth *eth, struct mtk_rx_ring *ring)
-{
-       int i, pad = 0;
-
-       ring->frag_size = mtk_max_frag_size(ETH_DATA_LEN);
-       ring->rx_buf_size = mtk_max_buf_size(ring->frag_size);
-       ring->rx_ring_size = eth->soc->dma_ring_size;
-       ring->rx_data = kcalloc(ring->rx_ring_size, sizeof(*ring->rx_data),
-                               GFP_KERNEL);
-       if (!ring->rx_data)
-               goto no_rx_mem;
-
-       for (i = 0; i < ring->rx_ring_size; i++) {
-               ring->rx_data[i] = netdev_alloc_frag(ring->frag_size);
-               if (!ring->rx_data[i])
-                       goto no_rx_mem;
-       }
-
-       ring->rx_dma =
-               dma_alloc_coherent(eth->dev,
-                                  ring->rx_ring_size * sizeof(*ring->rx_dma),
-                                  &ring->rx_phys, GFP_ATOMIC | __GFP_ZERO);
-       if (!ring->rx_dma)
-               goto no_rx_mem;
-
-       if (!eth->soc->rx_2b_offset)
-               pad = NET_IP_ALIGN;
-
-       for (i = 0; i < ring->rx_ring_size; i++) {
-               dma_addr_t dma_addr = dma_map_single(eth->dev,
-                               ring->rx_data[i] + NET_SKB_PAD + pad,
-                               ring->rx_buf_size,
-                               DMA_FROM_DEVICE);
-               if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
-                       goto no_rx_mem;
-               ring->rx_dma[i].rxd1 = (unsigned int)dma_addr;
-
-               if (eth->soc->rx_sg_dma)
-                       ring->rx_dma[i].rxd2 = RX_DMA_PLEN0(ring->rx_buf_size);
-               else
-                       ring->rx_dma[i].rxd2 = RX_DMA_LSO;
-       }
-       ring->rx_calc_idx = ring->rx_ring_size - 1;
-       /* make sure that all changes to the dma ring are flushed before we
-        * continue
-        */
-       wmb();
-
-       return 0;
-
-no_rx_mem:
-       return -ENOMEM;
-}
-
-static void mtk_txd_unmap(struct device *dev, struct mtk_tx_buf *tx_buf)
-{
-       if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
-               dma_unmap_single(dev,
-                                dma_unmap_addr(tx_buf, dma_addr0),
-                                dma_unmap_len(tx_buf, dma_len0),
-                                DMA_TO_DEVICE);
-       } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
-               dma_unmap_page(dev,
-                              dma_unmap_addr(tx_buf, dma_addr0),
-                              dma_unmap_len(tx_buf, dma_len0),
-                              DMA_TO_DEVICE);
-       }
-       if (tx_buf->flags & MTK_TX_FLAGS_PAGE1)
-               dma_unmap_page(dev,
-                              dma_unmap_addr(tx_buf, dma_addr1),
-                              dma_unmap_len(tx_buf, dma_len1),
-                              DMA_TO_DEVICE);
-
-       tx_buf->flags = 0;
-       if (tx_buf->skb && (tx_buf->skb != (struct sk_buff *)DMA_DUMMY_DESC))
-               dev_kfree_skb_any(tx_buf->skb);
-       tx_buf->skb = NULL;
-}
-
-static void mtk_pdma_tx_clean(struct mtk_eth *eth)
-{
-       struct mtk_tx_ring *ring = &eth->tx_ring;
-       int i;
-
-       if (ring->tx_buf) {
-               for (i = 0; i < ring->tx_ring_size; i++)
-                       mtk_txd_unmap(eth->dev, &ring->tx_buf[i]);
-               kfree(ring->tx_buf);
-               ring->tx_buf = NULL;
-       }
-
-       if (ring->tx_dma) {
-               dma_free_coherent(eth->dev,
-                                 ring->tx_ring_size * sizeof(*ring->tx_dma),
-                                 ring->tx_dma,
-                                 ring->tx_phys);
-               ring->tx_dma = NULL;
-       }
-}
-
-static void mtk_qdma_tx_clean(struct mtk_eth *eth)
-{
-       struct mtk_tx_ring *ring = &eth->tx_ring;
-       int i;
-
-       if (ring->tx_buf) {
-               for (i = 0; i < ring->tx_ring_size; i++)
-                       mtk_txd_unmap(eth->dev, &ring->tx_buf[i]);
-               kfree(ring->tx_buf);
-               ring->tx_buf = NULL;
-       }
-
-       if (ring->tx_dma) {
-               dma_free_coherent(eth->dev,
-                                 ring->tx_ring_size * sizeof(*ring->tx_dma),
-                                 ring->tx_dma,
-                                 ring->tx_phys);
-               ring->tx_dma = NULL;
-       }
-}
-
-void mtk_stats_update_mac(struct mtk_mac *mac)
-{
-       struct mtk_hw_stats *hw_stats = mac->hw_stats;
-       unsigned int base = mtk_reg_table[MTK_REG_MTK_COUNTER_BASE];
-       u64 stats;
-
-       base += hw_stats->reg_offset;
-
-       u64_stats_update_begin(&hw_stats->syncp);
-
-       if (mac->hw->soc->new_stats) {
-               hw_stats->rx_bytes += mtk_r32(mac->hw, base);
-               stats =  mtk_r32(mac->hw, base + 0x04);
-               if (stats)
-                       hw_stats->rx_bytes += (stats << 32);
-               hw_stats->rx_packets += mtk_r32(mac->hw, base + 0x08);
-               hw_stats->rx_overflow += mtk_r32(mac->hw, base + 0x10);
-               hw_stats->rx_fcs_errors += mtk_r32(mac->hw, base + 0x14);
-               hw_stats->rx_short_errors += mtk_r32(mac->hw, base + 0x18);
-               hw_stats->rx_long_errors += mtk_r32(mac->hw, base + 0x1c);
-               hw_stats->rx_checksum_errors += mtk_r32(mac->hw, base + 0x20);
-               hw_stats->rx_flow_control_packets +=
-                                               mtk_r32(mac->hw, base + 0x24);
-               hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x28);
-               hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x2c);
-               hw_stats->tx_bytes += mtk_r32(mac->hw, base + 0x30);
-               stats =  mtk_r32(mac->hw, base + 0x34);
-               if (stats)
-                       hw_stats->tx_bytes += (stats << 32);
-               hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x38);
-       } else {
-               hw_stats->tx_bytes += mtk_r32(mac->hw, base);
-               hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x04);
-               hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x08);
-               hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x0c);
-               hw_stats->rx_bytes += mtk_r32(mac->hw, base + 0x20);
-               hw_stats->rx_packets += mtk_r32(mac->hw, base + 0x24);
-               hw_stats->rx_overflow += mtk_r32(mac->hw, base + 0x28);
-               hw_stats->rx_fcs_errors += mtk_r32(mac->hw, base + 0x2c);
-               hw_stats->rx_short_errors += mtk_r32(mac->hw, base + 0x30);
-               hw_stats->rx_long_errors += mtk_r32(mac->hw, base + 0x34);
-               hw_stats->rx_checksum_errors += mtk_r32(mac->hw, base + 0x38);
-               hw_stats->rx_flow_control_packets +=
-                                               mtk_r32(mac->hw, base + 0x3c);
-       }
-
-       u64_stats_update_end(&hw_stats->syncp);
-}
-
-static void mtk_get_stats64(struct net_device *dev,
-                           struct rtnl_link_stats64 *storage)
-{
-       struct mtk_mac *mac = netdev_priv(dev);
-       struct mtk_hw_stats *hw_stats = mac->hw_stats;
-       unsigned int base = mtk_reg_table[MTK_REG_MTK_COUNTER_BASE];
-       unsigned int start;
-
-       if (!base) {
-               netdev_stats_to_stats64(storage, &dev->stats);
-               return;
-       }
-
-       if (netif_running(dev) && netif_device_present(dev)) {
-               if (spin_trylock(&hw_stats->stats_lock)) {
-                       mtk_stats_update_mac(mac);
-                       spin_unlock(&hw_stats->stats_lock);
-               }
-       }
-
-       do {
-               start = u64_stats_fetch_begin_irq(&hw_stats->syncp);
-               storage->rx_packets = hw_stats->rx_packets;
-               storage->tx_packets = hw_stats->tx_packets;
-               storage->rx_bytes = hw_stats->rx_bytes;
-               storage->tx_bytes = hw_stats->tx_bytes;
-               storage->collisions = hw_stats->tx_collisions;
-               storage->rx_length_errors = hw_stats->rx_short_errors +
-                       hw_stats->rx_long_errors;
-               storage->rx_over_errors = hw_stats->rx_overflow;
-               storage->rx_crc_errors = hw_stats->rx_fcs_errors;
-               storage->rx_errors = hw_stats->rx_checksum_errors;
-               storage->tx_aborted_errors = hw_stats->tx_skip;
-       } while (u64_stats_fetch_retry_irq(&hw_stats->syncp, start));
-
-       storage->tx_errors = dev->stats.tx_errors;
-       storage->rx_dropped = dev->stats.rx_dropped;
-       storage->tx_dropped = dev->stats.tx_dropped;
-}
-
-static int mtk_vlan_rx_add_vid(struct net_device *dev,
-                              __be16 proto, u16 vid)
-{
-       struct mtk_mac *mac = netdev_priv(dev);
-       struct mtk_eth *eth = mac->hw;
-       u32 idx = (vid & 0xf);
-       u32 vlan_cfg;
-
-       if (!((mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE]) &&
-             (dev->features & NETIF_F_HW_VLAN_CTAG_TX)))
-               return 0;
-
-       if (test_bit(idx, &eth->vlan_map)) {
-               netdev_warn(dev, "disable tx vlan offload\n");
-               dev->wanted_features &= ~NETIF_F_HW_VLAN_CTAG_TX;
-               netdev_update_features(dev);
-       } else {
-               vlan_cfg = mtk_r32(eth,
-                                  mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE] +
-                                  ((idx >> 1) << 2));
-               if (idx & 0x1) {
-                       vlan_cfg &= 0xffff;
-                       vlan_cfg |= (vid << 16);
-               } else {
-                       vlan_cfg &= 0xffff0000;
-                       vlan_cfg |= vid;
-               }
-               mtk_w32(eth,
-                       vlan_cfg, mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE] +
-                       ((idx >> 1) << 2));
-               set_bit(idx, &eth->vlan_map);
-       }
-
-       return 0;
-}
-
-static int mtk_vlan_rx_kill_vid(struct net_device *dev,
-                               __be16 proto, u16 vid)
-{
-       struct mtk_mac *mac = netdev_priv(dev);
-       struct mtk_eth *eth = mac->hw;
-       u32 idx = (vid & 0xf);
-
-       if (!((mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE]) &&
-             (dev->features & NETIF_F_HW_VLAN_CTAG_TX)))
-               return 0;
-
-       clear_bit(idx, &eth->vlan_map);
-
-       return 0;
-}
-
-static inline u32 mtk_pdma_empty_txd(struct mtk_tx_ring *ring)
-{
-       barrier();
-       return (u32)(ring->tx_ring_size -
-                    ((ring->tx_next_idx - ring->tx_free_idx) &
-                     (ring->tx_ring_size - 1)));
-}
-
-static int mtk_skb_padto(struct sk_buff *skb, struct mtk_eth *eth)
-{
-       unsigned int len;
-       int ret;
-
-       if (unlikely(skb->len >= VLAN_ETH_ZLEN))
-               return 0;
-
-       if (eth->soc->padding_64b && !eth->soc->padding_bug)
-               return 0;
-
-       if (skb_vlan_tag_present(skb))
-               len = ETH_ZLEN;
-       else if (skb->protocol == cpu_to_be16(ETH_P_8021Q))
-               len = VLAN_ETH_ZLEN;
-       else if (!eth->soc->padding_64b)
-               len = ETH_ZLEN;
-       else
-               return 0;
-
-       if (skb->len >= len)
-               return 0;
-
-       ret = skb_pad(skb, len - skb->len);
-       if (ret < 0)
-               return ret;
-       skb->len = len;
-       skb_set_tail_pointer(skb, len);
-
-       return ret;
-}
-
-static int mtk_pdma_tx_map(struct sk_buff *skb, struct net_device *dev,
-                          int tx_num, struct mtk_tx_ring *ring, bool gso)
-{
-       struct mtk_mac *mac = netdev_priv(dev);
-       struct mtk_eth *eth = mac->hw;
-       struct skb_frag_struct *frag;
-       struct mtk_tx_dma txd, *ptxd;
-       struct mtk_tx_buf *tx_buf;
-       int i, j, k, frag_size, frag_map_size, offset;
-       dma_addr_t mapped_addr;
-       unsigned int nr_frags;
-       u32 def_txd4;
-
-       if (mtk_skb_padto(skb, eth)) {
-               netif_warn(eth, tx_err, dev, "tx padding failed!\n");
-               return -1;
-       }
-
-       tx_buf = &ring->tx_buf[ring->tx_next_idx];
-       memset(tx_buf, 0, sizeof(*tx_buf));
-       memset(&txd, 0, sizeof(txd));
-       nr_frags = skb_shinfo(skb)->nr_frags;
-
-       /* init tx descriptor */
-       def_txd4 = eth->soc->txd4;
-       txd.txd4 = def_txd4;
-
-       if (eth->soc->mac_count > 1)
-               txd.txd4 |= (mac->id + 1) << TX_DMA_FPORT_SHIFT;
-
-       if (gso)
-               txd.txd4 |= TX_DMA_TSO;
-
-       /* TX Checksum offload */
-       if (skb->ip_summed == CHECKSUM_PARTIAL)
-               txd.txd4 |= TX_DMA_CHKSUM;
-
-       /* VLAN header offload */
-       if (skb_vlan_tag_present(skb)) {
-               u16 tag = skb_vlan_tag_get(skb);
-
-               txd.txd4 |= TX_DMA_INS_VLAN |
-                       ((tag >> VLAN_PRIO_SHIFT) << 4) |
-                       (tag & 0xF);
-       }
-
-       mapped_addr = dma_map_single(&dev->dev, skb->data,
-                                    skb_headlen(skb), DMA_TO_DEVICE);
-       if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
-               return -1;
-
-       txd.txd1 = mapped_addr;
-       txd.txd2 = TX_DMA_PLEN0(skb_headlen(skb));
-
-       tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
-       dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
-       dma_unmap_len_set(tx_buf, dma_len0, skb_headlen(skb));
-
-       /* TX SG offload */
-       j = ring->tx_next_idx;
-       k = 0;
-       for (i = 0; i < nr_frags; i++) {
-               offset = 0;
-               frag = &skb_shinfo(skb)->frags[i];
-               frag_size = skb_frag_size(frag);
-
-               while (frag_size > 0) {
-                       frag_map_size = min(frag_size, TX_DMA_BUF_LEN);
-                       mapped_addr = skb_frag_dma_map(&dev->dev, frag, offset,
-                                                      frag_map_size,
-                                                      DMA_TO_DEVICE);
-                       if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
-                               goto err_dma;
-
-                       if (k & 0x1) {
-                               j = NEXT_TX_DESP_IDX(j);
-                               txd.txd1 = mapped_addr;
-                               txd.txd2 = TX_DMA_PLEN0(frag_map_size);
-                               txd.txd4 = def_txd4;
-
-                               tx_buf = &ring->tx_buf[j];
-                               memset(tx_buf, 0, sizeof(*tx_buf));
-
-                               tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
-                               dma_unmap_addr_set(tx_buf, dma_addr0,
-                                                  mapped_addr);
-                               dma_unmap_len_set(tx_buf, dma_len0,
-                                                 frag_map_size);
-                       } else {
-                               txd.txd3 = mapped_addr;
-                               txd.txd2 |= TX_DMA_PLEN1(frag_map_size);
-
-                               tx_buf->skb = (struct sk_buff *)DMA_DUMMY_DESC;
-                               tx_buf->flags |= MTK_TX_FLAGS_PAGE1;
-                               dma_unmap_addr_set(tx_buf, dma_addr1,
-                                                  mapped_addr);
-                               dma_unmap_len_set(tx_buf, dma_len1,
-                                                 frag_map_size);
-
-                               if (!((i == (nr_frags - 1)) &&
-                                     (frag_map_size == frag_size))) {
-                                       mtk_set_txd_pdma(&txd,
-                                                        &ring->tx_dma[j]);
-                                       memset(&txd, 0, sizeof(txd));
-                               }
-                       }
-                       frag_size -= frag_map_size;
-                       offset += frag_map_size;
-                       k++;
-               }
-       }
-
-       /* set last segment */
-       if (k & 0x1)
-               txd.txd2 |= TX_DMA_LS1;
-       else
-               txd.txd2 |= TX_DMA_LS0;
-       mtk_set_txd_pdma(&txd, &ring->tx_dma[j]);
-
-       /* store skb to cleanup */
-       tx_buf->skb = skb;
-
-       netdev_sent_queue(dev, skb->len);
-       skb_tx_timestamp(skb);
-
-       ring->tx_next_idx = NEXT_TX_DESP_IDX(j);
-       /* make sure that all changes to the dma ring are flushed before we
-        * continue
-        */
-       wmb();
-       atomic_set(&ring->tx_free_count, mtk_pdma_empty_txd(ring));
-
-       if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || !skb->xmit_more)
-               mtk_reg_w32(eth, ring->tx_next_idx, MTK_REG_TX_CTX_IDX0);
-
-       return 0;
-
-err_dma:
-       j = ring->tx_next_idx;
-       for (i = 0; i < tx_num; i++) {
-               ptxd = &ring->tx_dma[j];
-               tx_buf = &ring->tx_buf[j];
-
-               /* unmap dma */
-               mtk_txd_unmap(&dev->dev, tx_buf);
-
-               ptxd->txd2 = TX_DMA_DESP2_DEF;
-               j = NEXT_TX_DESP_IDX(j);
-       }
-       /* make sure that all changes to the dma ring are flushed before we
-        * continue
-        */
-       wmb();
-       return -1;
-}
-
-/* the qdma core needs scratch memory to be setup */
-static int mtk_init_fq_dma(struct mtk_eth *eth)
-{
-       dma_addr_t dma_addr, phy_ring_head, phy_ring_tail;
-       int cnt = eth->soc->dma_ring_size;
-       int i;
-
-       eth->scratch_ring = dma_alloc_coherent(eth->dev,
-                                              cnt * sizeof(struct mtk_tx_dma),
-                                              &phy_ring_head,
-                                              GFP_ATOMIC | __GFP_ZERO);
-       if (unlikely(!eth->scratch_ring))
-               return -ENOMEM;
-
-       eth->scratch_head = kcalloc(cnt, QDMA_PAGE_SIZE,
-                                   GFP_KERNEL);
-       dma_addr = dma_map_single(eth->dev,
-                                 eth->scratch_head, cnt * QDMA_PAGE_SIZE,
-                                 DMA_FROM_DEVICE);
-       if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
-               return -ENOMEM;
-
-       memset(eth->scratch_ring, 0x0, sizeof(struct mtk_tx_dma) * cnt);
-       phy_ring_tail = phy_ring_head + (sizeof(struct mtk_tx_dma) * (cnt - 1));
-
-       for (i = 0; i < cnt; i++) {
-               eth->scratch_ring[i].txd1 = (dma_addr + (i * QDMA_PAGE_SIZE));
-               if (i < cnt - 1)
-                       eth->scratch_ring[i].txd2 = (phy_ring_head +
-                               ((i + 1) * sizeof(struct mtk_tx_dma)));
-               eth->scratch_ring[i].txd3 = TX_QDMA_SDL(QDMA_PAGE_SIZE);
-       }
-
-       mtk_w32(eth, phy_ring_head, MTK_QDMA_FQ_HEAD);
-       mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL);
-       mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT);
-       mtk_w32(eth, QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN);
-
-       return 0;
-}
-
-static void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
-{
-       void *ret = ring->tx_dma;
-
-       return ret + (desc - ring->tx_phys);
-}
-
-static struct mtk_tx_dma *mtk_tx_next_qdma(struct mtk_tx_ring *ring,
-                                          struct mtk_tx_dma *txd)
-{
-       return mtk_qdma_phys_to_virt(ring, txd->txd2);
-}
-
-static struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
-                                            struct mtk_tx_dma *txd)
-{
-       int idx = txd - ring->tx_dma;
-
-       return &ring->tx_buf[idx];
-}
-
-static int mtk_qdma_tx_map(struct sk_buff *skb, struct net_device *dev,
-                          int tx_num, struct mtk_tx_ring *ring, bool gso)
-{
-       struct mtk_mac *mac = netdev_priv(dev);
-       struct mtk_eth *eth = mac->hw;
-       struct mtk_tx_dma *itxd, *txd;
-       struct mtk_tx_buf *tx_buf;
-       dma_addr_t mapped_addr;
-       unsigned int nr_frags;
-       int i, n_desc = 1;
-       u32 txd4 = eth->soc->txd4;
-
-       itxd = ring->tx_next_free;
-       if (itxd == ring->tx_last_free)
-               return -ENOMEM;
-
-       if (eth->soc->mac_count > 1)
-               txd4 |= (mac->id + 1) << TX_DMA_FPORT_SHIFT;
-
-       tx_buf = mtk_desc_to_tx_buf(ring, itxd);
-       memset(tx_buf, 0, sizeof(*tx_buf));
-
-       if (gso)
-               txd4 |= TX_DMA_TSO;
-
-       /* TX Checksum offload */
-       if (skb->ip_summed == CHECKSUM_PARTIAL)
-               txd4 |= TX_DMA_CHKSUM;
-
-       /* VLAN header offload */
-       if (skb_vlan_tag_present(skb))
-               txd4 |= TX_DMA_INS_VLAN_MT7621 | skb_vlan_tag_get(skb);
-
-       mapped_addr = dma_map_single(&dev->dev, skb->data,
-                                    skb_headlen(skb), DMA_TO_DEVICE);
-       if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
-               return -ENOMEM;
-
-       WRITE_ONCE(itxd->txd1, mapped_addr);
-       tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
-       dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
-       dma_unmap_len_set(tx_buf, dma_len0, skb_headlen(skb));
-
-       /* TX SG offload */
-       txd = itxd;
-       nr_frags = skb_shinfo(skb)->nr_frags;
-       for (i = 0; i < nr_frags; i++) {
-               struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
-               unsigned int offset = 0;
-               int frag_size = skb_frag_size(frag);
-
-               while (frag_size) {
-                       bool last_frag = false;
-                       unsigned int frag_map_size;
-
-                       txd = mtk_tx_next_qdma(ring, txd);
-                       if (txd == ring->tx_last_free)
-                               goto err_dma;
-
-                       n_desc++;
-                       frag_map_size = min(frag_size, TX_DMA_BUF_LEN);
-                       mapped_addr = skb_frag_dma_map(&dev->dev, frag, offset,
-                                                      frag_map_size,
-                                                      DMA_TO_DEVICE);
-                       if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
-                               goto err_dma;
-
-                       if (i == nr_frags - 1 &&
-                           (frag_size - frag_map_size) == 0)
-                               last_frag = true;
-
-                       WRITE_ONCE(txd->txd1, mapped_addr);
-                       WRITE_ONCE(txd->txd3, (QDMA_TX_SWC |
-                                              TX_DMA_PLEN0(frag_map_size) |
-                                              last_frag * TX_DMA_LS0) |
-                                              mac->id);
-                       WRITE_ONCE(txd->txd4, 0);
-
-                       tx_buf->skb = (struct sk_buff *)DMA_DUMMY_DESC;
-                       tx_buf = mtk_desc_to_tx_buf(ring, txd);
-                       memset(tx_buf, 0, sizeof(*tx_buf));
-
-                       tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
-                       dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
-                       dma_unmap_len_set(tx_buf, dma_len0, frag_map_size);
-                       frag_size -= frag_map_size;
-                       offset += frag_map_size;
-               }
-       }
-
-       /* store skb to cleanup */
-       tx_buf->skb = skb;
-
-       WRITE_ONCE(itxd->txd4, txd4);
-       WRITE_ONCE(itxd->txd3, (QDMA_TX_SWC | TX_DMA_PLEN0(skb_headlen(skb)) |
-                               (!nr_frags * TX_DMA_LS0)));
-
-       netdev_sent_queue(dev, skb->len);
-       skb_tx_timestamp(skb);
-
-       ring->tx_next_free = mtk_tx_next_qdma(ring, txd);
-       atomic_sub(n_desc, &ring->tx_free_count);
-
-       /* make sure that all changes to the dma ring are flushed before we
-        * continue
-        */
-       wmb();
-
-       if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || !skb->xmit_more)
-               mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR);
-
-       return 0;
-
-err_dma:
-       do {
-               tx_buf = mtk_desc_to_tx_buf(ring, txd);
-
-               /* unmap dma */
-               mtk_txd_unmap(&dev->dev, tx_buf);
-
-               itxd->txd3 = TX_DMA_DESP2_DEF;
-               itxd = mtk_tx_next_qdma(ring, itxd);
-       } while (itxd != txd);
-
-       return -ENOMEM;
-}
-
-static inline int mtk_cal_txd_req(struct sk_buff *skb)
-{
-       int i, nfrags;
-       struct skb_frag_struct *frag;
-
-       nfrags = 1;
-       if (skb_is_gso(skb)) {
-               for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-                       frag = &skb_shinfo(skb)->frags[i];
-                       nfrags += DIV_ROUND_UP(frag->size, TX_DMA_BUF_LEN);
-               }
-       } else {
-               nfrags += skb_shinfo(skb)->nr_frags;
-       }
-
-       return DIV_ROUND_UP(nfrags, 2);
-}
-
-static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
-{
-       struct mtk_mac *mac = netdev_priv(dev);
-       struct mtk_eth *eth = mac->hw;
-       struct mtk_tx_ring *ring = &eth->tx_ring;
-       struct net_device_stats *stats = &dev->stats;
-       int tx_num;
-       int len = skb->len;
-       bool gso = false;
-
-       tx_num = mtk_cal_txd_req(skb);
-       if (unlikely(atomic_read(&ring->tx_free_count) <= tx_num)) {
-               netif_stop_queue(dev);
-               netif_err(eth, tx_queued, dev,
-                         "Tx Ring full when queue awake!\n");
-               return NETDEV_TX_BUSY;
-       }
-
-       /* TSO: fill MSS info in tcp checksum field */
-       if (skb_is_gso(skb)) {
-               if (skb_cow_head(skb, 0)) {
-                       netif_warn(eth, tx_err, dev,
-                                  "GSO expand head fail.\n");
-                       goto drop;
-               }
-
-               if (skb_shinfo(skb)->gso_type &
-                               (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
-                       gso = true;
-                       tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
-               }
-       }
-
-       if (ring->tx_map(skb, dev, tx_num, ring, gso) < 0)
-               goto drop;
-
-       stats->tx_packets++;
-       stats->tx_bytes += len;
-
-       if (unlikely(atomic_read(&ring->tx_free_count) <= ring->tx_thresh)) {
-               netif_stop_queue(dev);
-               smp_mb();
-               if (unlikely(atomic_read(&ring->tx_free_count) >
-                            ring->tx_thresh))
-                       netif_wake_queue(dev);
-       }
-
-       return NETDEV_TX_OK;
-
-drop:
-       stats->tx_dropped++;
-       dev_kfree_skb(skb);
-       return NETDEV_TX_OK;
-}
-
-static int mtk_poll_rx(struct napi_struct *napi, int budget,
-                      struct mtk_eth *eth, u32 rx_intr)
-{
-       struct mtk_soc_data *soc = eth->soc;
-       struct mtk_rx_ring *ring = &eth->rx_ring[0];
-       int idx = ring->rx_calc_idx;
-       u32 checksum_bit;
-       struct sk_buff *skb;
-       u8 *data, *new_data;
-       struct mtk_rx_dma *rxd, trxd;
-       int done = 0, pad;
-
-       if (eth->soc->hw_features & NETIF_F_RXCSUM)
-               checksum_bit = soc->checksum_bit;
-       else
-               checksum_bit = 0;
-
-       if (eth->soc->rx_2b_offset)
-               pad = 0;
-       else
-               pad = NET_IP_ALIGN;
-
-       while (done < budget) {
-               struct net_device *netdev;
-               unsigned int pktlen;
-               dma_addr_t dma_addr;
-               int mac = 0;
-
-               idx = NEXT_RX_DESP_IDX(idx);
-               rxd = &ring->rx_dma[idx];
-               data = ring->rx_data[idx];
-
-               mtk_get_rxd(&trxd, rxd);
-               if (!(trxd.rxd2 & RX_DMA_DONE))
-                       break;
-
-               /* find out which mac the packet come from. values start at 1 */
-               if (eth->soc->mac_count > 1) {
-                       mac = (trxd.rxd4 >> RX_DMA_FPORT_SHIFT) &
-                             RX_DMA_FPORT_MASK;
-                       mac--;
-                       if (mac < 0 || mac >= eth->soc->mac_count)
-                               goto release_desc;
-               }
-
-               netdev = eth->netdev[mac];
-
-               /* alloc new buffer */
-               new_data = napi_alloc_frag(ring->frag_size);
-               if (unlikely(!new_data || !netdev)) {
-                       netdev->stats.rx_dropped++;
-                       goto release_desc;
-               }
-               dma_addr = dma_map_single(&netdev->dev,
-                                         new_data + NET_SKB_PAD + pad,
-                                         ring->rx_buf_size,
-                                         DMA_FROM_DEVICE);
-               if (unlikely(dma_mapping_error(&netdev->dev, dma_addr))) {
-                       skb_free_frag(new_data);
-                       goto release_desc;
-               }
-
-               /* receive data */
-               skb = build_skb(data, ring->frag_size);
-               if (unlikely(!skb)) {
-                       put_page(virt_to_head_page(new_data));
-                       goto release_desc;
-               }
-               skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
-
-               dma_unmap_single(&netdev->dev, trxd.rxd1,
-                                ring->rx_buf_size, DMA_FROM_DEVICE);
-               pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
-               skb->dev = netdev;
-               skb_put(skb, pktlen);
-               if (trxd.rxd4 & checksum_bit)
-                       skb->ip_summed = CHECKSUM_UNNECESSARY;
-               else
-                       skb_checksum_none_assert(skb);
-               skb->protocol = eth_type_trans(skb, netdev);
-
-               netdev->stats.rx_packets++;
-               netdev->stats.rx_bytes += pktlen;
-
-               if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX &&
-                   RX_DMA_VID(trxd.rxd3))
-                       __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
-                                              RX_DMA_VID(trxd.rxd3));
-               napi_gro_receive(napi, skb);
-
-               ring->rx_data[idx] = new_data;
-               rxd->rxd1 = (unsigned int)dma_addr;
-
-release_desc:
-               if (eth->soc->rx_sg_dma)
-                       rxd->rxd2 = RX_DMA_PLEN0(ring->rx_buf_size);
-               else
-                       rxd->rxd2 = RX_DMA_LSO;
-
-               ring->rx_calc_idx = idx;
-               /* make sure that all changes to the dma ring are flushed before
-                * we continue
-                */
-               wmb();
-               if (eth->soc->dma_type == MTK_QDMA)
-                       mtk_w32(eth, ring->rx_calc_idx, MTK_QRX_CRX_IDX0);
-               else
-                       mtk_reg_w32(eth, ring->rx_calc_idx,
-                                   MTK_REG_RX_CALC_IDX0);
-               done++;
-       }
-
-       if (done < budget)
-               mtk_irq_ack(eth, rx_intr);
-
-       return done;
-}
-
-static int mtk_pdma_tx_poll(struct mtk_eth *eth, int budget, bool *tx_again)
-{
-       struct sk_buff *skb;
-       struct mtk_tx_buf *tx_buf;
-       int done = 0;
-       u32 idx, hwidx;
-       struct mtk_tx_ring *ring = &eth->tx_ring;
-       unsigned int bytes = 0;
-
-       idx = ring->tx_free_idx;
-       hwidx = mtk_reg_r32(eth, MTK_REG_TX_DTX_IDX0);
-
-       while ((idx != hwidx) && budget) {
-               tx_buf = &ring->tx_buf[idx];
-               skb = tx_buf->skb;
-
-               if (!skb)
-                       break;
-
-               if (skb != (struct sk_buff *)DMA_DUMMY_DESC) {
-                       bytes += skb->len;
-                       done++;
-                       budget--;
-               }
-               mtk_txd_unmap(eth->dev, tx_buf);
-               idx = NEXT_TX_DESP_IDX(idx);
-       }
-       ring->tx_free_idx = idx;
-       atomic_set(&ring->tx_free_count, mtk_pdma_empty_txd(ring));
-
-       /* read hw index again make sure no new tx packet */
-       if (idx != hwidx || idx != mtk_reg_r32(eth, MTK_REG_TX_DTX_IDX0))
-               *tx_again = 1;
-
-       if (done)
-               netdev_completed_queue(*eth->netdev, done, bytes);
-
-       return done;
-}
-
-static int mtk_qdma_tx_poll(struct mtk_eth *eth, int budget, bool *tx_again)
-{
-       struct mtk_tx_ring *ring = &eth->tx_ring;
-       struct mtk_tx_dma *desc;
-       struct sk_buff *skb;
-       struct mtk_tx_buf *tx_buf;
-       int total = 0, done[MTK_MAX_DEVS];
-       unsigned int bytes[MTK_MAX_DEVS];
-       u32 cpu, dma;
-       int i;
-
-       memset(done, 0, sizeof(done));
-       memset(bytes, 0, sizeof(bytes));
-
-       cpu = mtk_r32(eth, MTK_QTX_CRX_PTR);
-       dma = mtk_r32(eth, MTK_QTX_DRX_PTR);
-
-       desc = mtk_qdma_phys_to_virt(ring, cpu);
-
-       while ((cpu != dma) && budget) {
-               u32 next_cpu = desc->txd2;
-               int mac;
-
-               desc = mtk_tx_next_qdma(ring, desc);
-               if ((desc->txd3 & QDMA_TX_OWNER_CPU) == 0)
-                       break;
-
-               mac = (desc->txd4 >> TX_DMA_FPORT_SHIFT) &
-                      TX_DMA_FPORT_MASK;
-               mac--;
-
-               tx_buf = mtk_desc_to_tx_buf(ring, desc);
-               skb = tx_buf->skb;
-               if (!skb)
-                       break;
-
-               if (skb != (struct sk_buff *)DMA_DUMMY_DESC) {
-                       bytes[mac] += skb->len;
-                       done[mac]++;
-                       budget--;
-               }
-               mtk_txd_unmap(eth->dev, tx_buf);
-
-               ring->tx_last_free->txd2 = next_cpu;
-               ring->tx_last_free = desc;
-               atomic_inc(&ring->tx_free_count);
-
-               cpu = next_cpu;
-       }
-
-       mtk_w32(eth, cpu, MTK_QTX_CRX_PTR);
-
-       /* read hw index again make sure no new tx packet */
-       if (cpu != dma || cpu != mtk_r32(eth, MTK_QTX_DRX_PTR))
-               *tx_again = true;
-
-       for (i = 0; i < eth->soc->mac_count; i++) {
-               if (!done[i])
-                       continue;
-               netdev_completed_queue(eth->netdev[i], done[i], bytes[i]);
-               total += done[i];
-       }
-
-       return total;
-}
-
-static int mtk_poll_tx(struct mtk_eth *eth, int budget, u32 tx_intr,
-                      bool *tx_again)
-{
-       struct mtk_tx_ring *ring = &eth->tx_ring;
-       struct net_device *netdev = eth->netdev[0];
-       int done;
-
-       done = eth->tx_ring.tx_poll(eth, budget, tx_again);
-       if (!*tx_again)
-               mtk_irq_ack(eth, tx_intr);
-
-       if (!done)
-               return 0;
-
-       smp_mb();
-       if (unlikely(!netif_queue_stopped(netdev)))
-               return done;
-
-       if (atomic_read(&ring->tx_free_count) > ring->tx_thresh)
-               netif_wake_queue(netdev);
-
-       return done;
-}
-
-static void mtk_stats_update(struct mtk_eth *eth)
-{
-       int i;
-
-       for (i = 0; i < eth->soc->mac_count; i++) {
-               if (!eth->mac[i] || !eth->mac[i]->hw_stats)
-                       continue;
-               if (spin_trylock(&eth->mac[i]->hw_stats->stats_lock)) {
-                       mtk_stats_update_mac(eth->mac[i]);
-                       spin_unlock(&eth->mac[i]->hw_stats->stats_lock);
-               }
-       }
-}
-
-static int mtk_poll(struct napi_struct *napi, int budget)
-{
-       struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
-       u32 status, mtk_status, mask, tx_intr, rx_intr, status_intr;
-       int tx_done, rx_done;
-       bool tx_again = false;
-
-       status = mtk_irq_pending(eth);
-       mtk_status = mtk_irq_pending_status(eth);
-       tx_intr = eth->soc->tx_int;
-       rx_intr = eth->soc->rx_int;
-       status_intr = eth->soc->status_int;
-       tx_done = 0;
-       rx_done = 0;
-       tx_again = 0;
-
-       if (status & tx_intr)
-               tx_done = mtk_poll_tx(eth, budget, tx_intr, &tx_again);
-
-       if (status & rx_intr)
-               rx_done = mtk_poll_rx(napi, budget, eth, rx_intr);
-
-       if (unlikely(mtk_status & status_intr)) {
-               mtk_stats_update(eth);
-               mtk_irq_ack_status(eth, status_intr);
-       }
-
-       if (unlikely(netif_msg_intr(eth))) {
-               mask = mtk_irq_enabled(eth);
-               netdev_info(eth->netdev[0],
-                           "done tx %d, rx %d, intr 0x%08x/0x%x\n",
-                           tx_done, rx_done, status, mask);
-       }
-
-       if (tx_again || rx_done == budget)
-               return budget;
-
-       status = mtk_irq_pending(eth);
-       if (status & (tx_intr | rx_intr))
-               return budget;
-
-       napi_complete(napi);
-       mtk_irq_enable(eth, tx_intr | rx_intr);
-
-       return rx_done;
-}
-
-static int mtk_pdma_tx_alloc(struct mtk_eth *eth)
-{
-       int i;
-       struct mtk_tx_ring *ring = &eth->tx_ring;
-
-       ring->tx_ring_size = eth->soc->dma_ring_size;
-       ring->tx_free_idx = 0;
-       ring->tx_next_idx = 0;
-       ring->tx_thresh = max((unsigned long)ring->tx_ring_size >> 2,
-                             MAX_SKB_FRAGS);
-
-       ring->tx_buf = kcalloc(ring->tx_ring_size, sizeof(*ring->tx_buf),
-                              GFP_KERNEL);
-       if (!ring->tx_buf)
-               goto no_tx_mem;
-
-       ring->tx_dma =
-               dma_alloc_coherent(eth->dev,
-                                  ring->tx_ring_size * sizeof(*ring->tx_dma),
-                                  &ring->tx_phys, GFP_ATOMIC | __GFP_ZERO);
-       if (!ring->tx_dma)
-               goto no_tx_mem;
-
-       for (i = 0; i < ring->tx_ring_size; i++) {
-               ring->tx_dma[i].txd2 = TX_DMA_DESP2_DEF;
-               ring->tx_dma[i].txd4 = eth->soc->txd4;
-       }
-
-       atomic_set(&ring->tx_free_count, mtk_pdma_empty_txd(ring));
-       ring->tx_map = mtk_pdma_tx_map;
-       ring->tx_poll = mtk_pdma_tx_poll;
-       ring->tx_clean = mtk_pdma_tx_clean;
-
-       /* make sure that all changes to the dma ring are flushed before we
-        * continue
-        */
-       wmb();
-
-       mtk_reg_w32(eth, ring->tx_phys, MTK_REG_TX_BASE_PTR0);
-       mtk_reg_w32(eth, ring->tx_ring_size, MTK_REG_TX_MAX_CNT0);
-       mtk_reg_w32(eth, 0, MTK_REG_TX_CTX_IDX0);
-       mtk_reg_w32(eth, MTK_PST_DTX_IDX0, MTK_REG_PDMA_RST_CFG);
-
-       return 0;
-
-no_tx_mem:
-       return -ENOMEM;
-}
-
-static int mtk_qdma_tx_alloc_tx(struct mtk_eth *eth)
-{
-       struct mtk_tx_ring *ring = &eth->tx_ring;
-       int i, sz = sizeof(*ring->tx_dma);
-
-       ring->tx_ring_size = eth->soc->dma_ring_size;
-       ring->tx_buf = kcalloc(ring->tx_ring_size, sizeof(*ring->tx_buf),
-                              GFP_KERNEL);
-       if (!ring->tx_buf)
-               goto no_tx_mem;
-
-       ring->tx_dma = dma_alloc_coherent(eth->dev, ring->tx_ring_size * sz,
-                                         &ring->tx_phys,
-                                         GFP_ATOMIC | __GFP_ZERO);
-       if (!ring->tx_dma)
-               goto no_tx_mem;
-
-       for (i = 0; i < ring->tx_ring_size; i++) {
-               int next = (i + 1) % ring->tx_ring_size;
-               u32 next_ptr = ring->tx_phys + next * sz;
-
-               ring->tx_dma[i].txd2 = next_ptr;
-               ring->tx_dma[i].txd3 = TX_DMA_DESP2_DEF;
-       }
-
-       atomic_set(&ring->tx_free_count, ring->tx_ring_size - 2);
-       ring->tx_next_free = &ring->tx_dma[0];
-       ring->tx_last_free = &ring->tx_dma[ring->tx_ring_size - 2];
-       ring->tx_thresh = max((unsigned long)ring->tx_ring_size >> 2,
-                             MAX_SKB_FRAGS);
-
-       ring->tx_map = mtk_qdma_tx_map;
-       ring->tx_poll = mtk_qdma_tx_poll;
-       ring->tx_clean = mtk_qdma_tx_clean;
-
-       /* make sure that all changes to the dma ring are flushed before we
-        * continue
-        */
-       wmb();
-
-       mtk_w32(eth, ring->tx_phys, MTK_QTX_CTX_PTR);
-       mtk_w32(eth, ring->tx_phys, MTK_QTX_DTX_PTR);
-       mtk_w32(eth,
-               ring->tx_phys + ((ring->tx_ring_size - 1) * sz),
-               MTK_QTX_CRX_PTR);
-       mtk_w32(eth,
-               ring->tx_phys + ((ring->tx_ring_size - 1) * sz),
-               MTK_QTX_DRX_PTR);
-
-       return 0;
-
-no_tx_mem:
-       return -ENOMEM;
-}
-
-static int mtk_qdma_init(struct mtk_eth *eth, int ring)
-{
-       int err;
-
-       err = mtk_init_fq_dma(eth);
-       if (err)
-               return err;
-
-       err = mtk_qdma_tx_alloc_tx(eth);
-       if (err)
-               return err;
-
-       err = mtk_dma_rx_alloc(eth, &eth->rx_ring[ring]);
-       if (err)
-               return err;
-
-       mtk_w32(eth, eth->rx_ring[ring].rx_phys, MTK_QRX_BASE_PTR0);
-       mtk_w32(eth, eth->rx_ring[ring].rx_ring_size, MTK_QRX_MAX_CNT0);
-       mtk_w32(eth, eth->rx_ring[ring].rx_calc_idx, MTK_QRX_CRX_IDX0);
-       mtk_w32(eth, MTK_PST_DRX_IDX0, MTK_QDMA_RST_IDX);
-       mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES, MTK_QTX_CFG(0));
-
-       /* Enable random early drop and set drop threshold automatically */
-       mtk_w32(eth, 0x174444, MTK_QDMA_FC_THRES);
-       mtk_w32(eth, 0x0, MTK_QDMA_HRED2);
-
-       return 0;
-}
-
-static int mtk_pdma_qdma_init(struct mtk_eth *eth)
-{
-       int err = mtk_qdma_init(eth, 1);
-
-       if (err)
-               return err;
-
-       err = mtk_dma_rx_alloc(eth, &eth->rx_ring[0]);
-       if (err)
-               return err;
-
-       mtk_reg_w32(eth, eth->rx_ring[0].rx_phys, MTK_REG_RX_BASE_PTR0);
-       mtk_reg_w32(eth, eth->rx_ring[0].rx_ring_size, MTK_REG_RX_MAX_CNT0);
-       mtk_reg_w32(eth, eth->rx_ring[0].rx_calc_idx, MTK_REG_RX_CALC_IDX0);
-       mtk_reg_w32(eth, MTK_PST_DRX_IDX0, MTK_REG_PDMA_RST_CFG);
-
-       return 0;
-}
-
-static int mtk_pdma_init(struct mtk_eth *eth)
-{
-       struct mtk_rx_ring *ring = &eth->rx_ring[0];
-       int err;
-
-       err = mtk_pdma_tx_alloc(eth);
-       if (err)
-               return err;
-
-       err = mtk_dma_rx_alloc(eth, ring);
-       if (err)
-               return err;
-
-       mtk_reg_w32(eth, ring->rx_phys, MTK_REG_RX_BASE_PTR0);
-       mtk_reg_w32(eth, ring->rx_ring_size, MTK_REG_RX_MAX_CNT0);
-       mtk_reg_w32(eth, ring->rx_calc_idx, MTK_REG_RX_CALC_IDX0);
-       mtk_reg_w32(eth, MTK_PST_DRX_IDX0, MTK_REG_PDMA_RST_CFG);
-
-       return 0;
-}
-
-static void mtk_dma_free(struct mtk_eth *eth)
-{
-       int i;
-
-       for (i = 0; i < eth->soc->mac_count; i++)
-               if (eth->netdev[i])
-                       netdev_reset_queue(eth->netdev[i]);
-       eth->tx_ring.tx_clean(eth);
-       mtk_clean_rx(eth, &eth->rx_ring[0]);
-       mtk_clean_rx(eth, &eth->rx_ring[1]);
-       kfree(eth->scratch_head);
-}
-
-static void mtk_tx_timeout(struct net_device *dev)
-{
-       struct mtk_mac *mac = netdev_priv(dev);
-       struct mtk_eth *eth = mac->hw;
-       struct mtk_tx_ring *ring = &eth->tx_ring;
-
-       eth->netdev[mac->id]->stats.tx_errors++;
-       netif_err(eth, tx_err, dev,
-                 "transmit timed out\n");
-       if (eth->soc->dma_type & MTK_PDMA) {
-               netif_info(eth, drv, dev, "pdma_cfg:%08x\n",
-                          mtk_reg_r32(eth, MTK_REG_PDMA_GLO_CFG));
-               netif_info(eth, drv, dev,
-                          "tx_ring=%d, base=%08x, max=%u, ctx=%u, dtx=%u, fdx=%hu, next=%hu\n",
-                          0, mtk_reg_r32(eth, MTK_REG_TX_BASE_PTR0),
-                          mtk_reg_r32(eth, MTK_REG_TX_MAX_CNT0),
-                          mtk_reg_r32(eth, MTK_REG_TX_CTX_IDX0),
-                          mtk_reg_r32(eth, MTK_REG_TX_DTX_IDX0),
-                          ring->tx_free_idx,
-                          ring->tx_next_idx);
-       }
-       if (eth->soc->dma_type & MTK_QDMA) {
-               netif_info(eth, drv, dev, "qdma_cfg:%08x\n",
-                          mtk_r32(eth, MTK_QDMA_GLO_CFG));
-               netif_info(eth, drv, dev,
-                          "tx_ring=%d, ctx=%08x, dtx=%08x, crx=%08x, drx=%08x, free=%hu\n",
-                          0, mtk_r32(eth, MTK_QTX_CTX_PTR),
-                          mtk_r32(eth, MTK_QTX_DTX_PTR),
-                          mtk_r32(eth, MTK_QTX_CRX_PTR),
-                          mtk_r32(eth, MTK_QTX_DRX_PTR),
-                          atomic_read(&ring->tx_free_count));
-       }
-       netif_info(eth, drv, dev,
-                  "rx_ring=%d, base=%08x, max=%u, calc=%u, drx=%u\n",
-                  0, mtk_reg_r32(eth, MTK_REG_RX_BASE_PTR0),
-                  mtk_reg_r32(eth, MTK_REG_RX_MAX_CNT0),
-                  mtk_reg_r32(eth, MTK_REG_RX_CALC_IDX0),
-                  mtk_reg_r32(eth, MTK_REG_RX_DRX_IDX0));
-
-       schedule_work(&mac->pending_work);
-}
-
-static irqreturn_t mtk_handle_irq(int irq, void *_eth)
-{
-       struct mtk_eth *eth = _eth;
-       u32 status, int_mask;
-
-       status = mtk_irq_pending(eth);
-       if (unlikely(!status))
-               return IRQ_NONE;
-
-       int_mask = (eth->soc->rx_int | eth->soc->tx_int);
-       if (likely(status & int_mask)) {
-               if (likely(napi_schedule_prep(&eth->rx_napi)))
-                       __napi_schedule(&eth->rx_napi);
-       } else {
-               mtk_irq_ack(eth, status);
-       }
-       mtk_irq_disable(eth, int_mask);
-
-       return IRQ_HANDLED;
-}
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void mtk_poll_controller(struct net_device *dev)
-{
-       struct mtk_mac *mac = netdev_priv(dev);
-       struct mtk_eth *eth = mac->hw;
-       u32 int_mask = eth->soc->tx_int | eth->soc->rx_int;
-
-       mtk_irq_disable(eth, int_mask);
-       mtk_handle_irq(dev->irq, dev);
-       mtk_irq_enable(eth, int_mask);
-}
-#endif
-
-int mtk_set_clock_cycle(struct mtk_eth *eth)
-{
-       unsigned long sysclk = eth->sysclk;
-
-       sysclk /= MTK_US_CYC_CNT_DIVISOR;
-       sysclk <<= MTK_US_CYC_CNT_SHIFT;
-
-       mtk_w32(eth, (mtk_r32(eth, MTK_GLO_CFG) &
-                       ~(MTK_US_CYC_CNT_MASK << MTK_US_CYC_CNT_SHIFT)) |
-                       sysclk,
-                       MTK_GLO_CFG);
-       return 0;
-}
-
-void mtk_fwd_config(struct mtk_eth *eth)
-{
-       u32 fwd_cfg;
-
-       fwd_cfg = mtk_r32(eth, MTK_GDMA1_FWD_CFG);
-
-       /* disable jumbo frame */
-       if (eth->soc->jumbo_frame)
-               fwd_cfg &= ~MTK_GDM1_JMB_EN;
-
-       /* set unicast/multicast/broadcast frame to cpu */
-       fwd_cfg &= ~0xffff;
-
-       mtk_w32(eth, fwd_cfg, MTK_GDMA1_FWD_CFG);
-}
-
-void mtk_csum_config(struct mtk_eth *eth)
-{
-       if (eth->soc->hw_features & NETIF_F_RXCSUM)
-               mtk_w32(eth, mtk_r32(eth, MTK_GDMA1_FWD_CFG) |
-                       (MTK_GDM1_ICS_EN | MTK_GDM1_TCS_EN | MTK_GDM1_UCS_EN),
-                       MTK_GDMA1_FWD_CFG);
-       else
-               mtk_w32(eth, mtk_r32(eth, MTK_GDMA1_FWD_CFG) &
-                       ~(MTK_GDM1_ICS_EN | MTK_GDM1_TCS_EN | MTK_GDM1_UCS_EN),
-                       MTK_GDMA1_FWD_CFG);
-       if (eth->soc->hw_features & NETIF_F_IP_CSUM)
-               mtk_w32(eth, mtk_r32(eth, MTK_CDMA_CSG_CFG) |
-                       (MTK_ICS_GEN_EN | MTK_TCS_GEN_EN | MTK_UCS_GEN_EN),
-                       MTK_CDMA_CSG_CFG);
-       else
-               mtk_w32(eth, mtk_r32(eth, MTK_CDMA_CSG_CFG) &
-                       ~(MTK_ICS_GEN_EN | MTK_TCS_GEN_EN | MTK_UCS_GEN_EN),
-                       MTK_CDMA_CSG_CFG);
-}
-
-static int mtk_start_dma(struct mtk_eth *eth)
-{
-       unsigned long flags;
-       u32 val;
-       int err;
-
-       if (eth->soc->dma_type == MTK_PDMA)
-               err = mtk_pdma_init(eth);
-       else if (eth->soc->dma_type == MTK_QDMA)
-               err = mtk_qdma_init(eth, 0);
-       else
-               err = mtk_pdma_qdma_init(eth);
-       if (err) {
-               mtk_dma_free(eth);
-               return err;
-       }
-
-       spin_lock_irqsave(&eth->page_lock, flags);
-
-       val = MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN;
-       if (eth->soc->rx_2b_offset)
-               val |= MTK_RX_2B_OFFSET;
-       val |= eth->soc->pdma_glo_cfg;
-
-       if (eth->soc->dma_type & MTK_PDMA)
-               mtk_reg_w32(eth, val, MTK_REG_PDMA_GLO_CFG);
-
-       if (eth->soc->dma_type & MTK_QDMA)
-               mtk_w32(eth, val, MTK_QDMA_GLO_CFG);
-
-       spin_unlock_irqrestore(&eth->page_lock, flags);
-
-       return 0;
-}
-
-static int mtk_open(struct net_device *dev)
-{
-       struct mtk_mac *mac = netdev_priv(dev);
-       struct mtk_eth *eth = mac->hw;
-
-       dma_coerce_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32));
-
-       if (!atomic_read(&eth->dma_refcnt)) {
-               int err = mtk_start_dma(eth);
-
-               if (err)
-                       return err;
-
-               napi_enable(&eth->rx_napi);
-               mtk_irq_enable(eth, eth->soc->tx_int | eth->soc->rx_int);
-       }
-       atomic_inc(&eth->dma_refcnt);
-
-       if (eth->phy)
-               eth->phy->start(mac);
-
-       if (eth->soc->has_carrier && eth->soc->has_carrier(eth))
-               netif_carrier_on(dev);
-
-       netif_start_queue(dev);
-       eth->soc->fwd_config(eth);
-
-       return 0;
-}
-
-static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
-{
-       unsigned long flags;
-       u32 val;
-       int i;
-
-       /* stop the dma enfine */
-       spin_lock_irqsave(&eth->page_lock, flags);
-       val = mtk_r32(eth, glo_cfg);
-       mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
-               glo_cfg);
-       spin_unlock_irqrestore(&eth->page_lock, flags);
-
-       /* wait for dma stop */
-       for (i = 0; i < 10; i++) {
-               val = mtk_r32(eth, glo_cfg);
-               if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) {
-                       msleep(20);
-                       continue;
-               }
-               break;
-       }
-}
-
-static int mtk_stop(struct net_device *dev)
-{
-       struct mtk_mac *mac = netdev_priv(dev);
-       struct mtk_eth *eth = mac->hw;
-
-       netif_tx_disable(dev);
-       if (eth->phy)
-               eth->phy->stop(mac);
-
-       if (!atomic_dec_and_test(&eth->dma_refcnt))
-               return 0;
-
-       mtk_irq_disable(eth, eth->soc->tx_int | eth->soc->rx_int);
-       napi_disable(&eth->rx_napi);
-
-       if (eth->soc->dma_type & MTK_PDMA)
-               mtk_stop_dma(eth, mtk_reg_table[MTK_REG_PDMA_GLO_CFG]);
-
-       if (eth->soc->dma_type & MTK_QDMA)
-               mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
-
-       mtk_dma_free(eth);
-
-       return 0;
-}
-
-static int __init mtk_init_hw(struct mtk_eth *eth)
-{
-       int i, err;
-
-       eth->soc->reset_fe(eth);
-
-       if (eth->soc->switch_init)
-               if (eth->soc->switch_init(eth)) {
-                       dev_err(eth->dev, "failed to initialize switch core\n");
-                       return -ENODEV;
-               }
-
-       err = devm_request_irq(eth->dev, eth->irq, mtk_handle_irq, 0,
-                              dev_name(eth->dev), eth);
-       if (err)
-               return err;
-
-       err = mtk_mdio_init(eth);
-       if (err)
-               return err;
-
-       /* disable delay and normal interrupt */
-       mtk_reg_w32(eth, 0, MTK_REG_DLY_INT_CFG);
-       if (eth->soc->dma_type & MTK_QDMA)
-               mtk_w32(eth, 0, MTK_QDMA_DELAY_INT);
-       mtk_irq_disable(eth, eth->soc->tx_int | eth->soc->rx_int);
-
-       /* frame engine will push VLAN tag regarding to VIDX field in Tx desc */
-       if (mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE])
-               for (i = 0; i < 16; i += 2)
-                       mtk_w32(eth, ((i + 1) << 16) + i,
-                               mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE] +
-                               (i * 2));
-
-       if (eth->soc->fwd_config(eth))
-               dev_err(eth->dev, "unable to get clock\n");
-
-       if (mtk_reg_table[MTK_REG_MTK_RST_GL]) {
-               mtk_reg_w32(eth, 1, MTK_REG_MTK_RST_GL);
-               mtk_reg_w32(eth, 0, MTK_REG_MTK_RST_GL);
-       }
-
-       return 0;
-}
-
-static int __init mtk_init(struct net_device *dev)
-{
-       struct mtk_mac *mac = netdev_priv(dev);
-       struct mtk_eth *eth = mac->hw;
-       struct device_node *port;
-       const char *mac_addr;
-       int err;
-
-       mac_addr = of_get_mac_address(mac->of_node);
-       if (mac_addr)
-               ether_addr_copy(dev->dev_addr, mac_addr);
-
-       /* If the mac address is invalid, use random mac address  */
-       if (!is_valid_ether_addr(dev->dev_addr)) {
-               eth_hw_addr_random(dev);
-               dev_err(eth->dev, "generated random MAC address %pM\n",
-                       dev->dev_addr);
-       }
-       mac->hw->soc->set_mac(mac, dev->dev_addr);
-
-       if (eth->soc->port_init)
-               for_each_child_of_node(mac->of_node, port)
-                       if (of_device_is_compatible(port,
-                                                   "mediatek,eth-port") &&
-                           of_device_is_available(port))
-                               eth->soc->port_init(eth, mac, port);
-
-       if (eth->phy) {
-               err = eth->phy->connect(mac);
-               if (err)
-                       return err;
-       }
-
-       return 0;
-}
-
-static void mtk_uninit(struct net_device *dev)
-{
-       struct mtk_mac *mac = netdev_priv(dev);
-       struct mtk_eth *eth = mac->hw;
-
-       if (eth->phy)
-               eth->phy->disconnect(mac);
-       mtk_mdio_cleanup(eth);
-
-       mtk_irq_disable(eth, ~0);
-       free_irq(dev->irq, dev);
-}
-
-static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
-{
-       struct mtk_mac *mac = netdev_priv(dev);
-
-       if (!mac->phy_dev)
-               return -ENODEV;
-
-       switch (cmd) {
-       case SIOCGMIIPHY:
-       case SIOCGMIIREG:
-       case SIOCSMIIREG:
-               return phy_mii_ioctl(mac->phy_dev, ifr, cmd);
-       default:
-               break;
-       }
-
-       return -EOPNOTSUPP;
-}
-
-static int mtk_change_mtu(struct net_device *dev, int new_mtu)
-{
-       struct mtk_mac *mac = netdev_priv(dev);
-       struct mtk_eth *eth = mac->hw;
-       int frag_size, old_mtu;
-       u32 fwd_cfg;
-
-       if (!eth->soc->jumbo_frame)
-               return eth_change_mtu(dev, new_mtu);
-
-       frag_size = mtk_max_frag_size(new_mtu);
-       if (new_mtu < 68 || frag_size > PAGE_SIZE)
-               return -EINVAL;
-
-       old_mtu = dev->mtu;
-       dev->mtu = new_mtu;
-
-       /* return early if the buffer sizes will not change */
-       if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN)
-               return 0;
-       if (old_mtu > ETH_DATA_LEN && new_mtu > ETH_DATA_LEN)
-               return 0;
-
-       if (new_mtu <= ETH_DATA_LEN)
-               eth->rx_ring[0].frag_size = mtk_max_frag_size(ETH_DATA_LEN);
-       else
-               eth->rx_ring[0].frag_size = PAGE_SIZE;
-       eth->rx_ring[0].rx_buf_size =
-                               mtk_max_buf_size(eth->rx_ring[0].frag_size);
-
-       if (!netif_running(dev))
-               return 0;
-
-       mtk_stop(dev);
-       fwd_cfg = mtk_r32(eth, MTK_GDMA1_FWD_CFG);
-       if (new_mtu <= ETH_DATA_LEN) {
-               fwd_cfg &= ~MTK_GDM1_JMB_EN;
-       } else {
-               fwd_cfg &= ~(MTK_GDM1_JMB_LEN_MASK << MTK_GDM1_JMB_LEN_SHIFT);
-               fwd_cfg |= (DIV_ROUND_UP(frag_size, 1024) <<
-                               MTK_GDM1_JMB_LEN_SHIFT) | MTK_GDM1_JMB_EN;
-       }
-       mtk_w32(eth, fwd_cfg, MTK_GDMA1_FWD_CFG);
-
-       return mtk_open(dev);
-}
-
-static void mtk_pending_work(struct work_struct *work)
-{
-       struct mtk_mac *mac = container_of(work, struct mtk_mac, pending_work);
-       struct mtk_eth *eth = mac->hw;
-       struct net_device *dev = eth->netdev[mac->id];
-       int err;
-
-       rtnl_lock();
-       mtk_stop(dev);
-
-       err = mtk_open(dev);
-       if (err) {
-               netif_alert(eth, ifup, dev,
-                           "Driver up/down cycle failed, closing device.\n");
-               dev_close(dev);
-       }
-       rtnl_unlock();
-}
-
-static int mtk_cleanup(struct mtk_eth *eth)
-{
-       int i;
-
-       for (i = 0; i < eth->soc->mac_count; i++) {
-               struct mtk_mac *mac = netdev_priv(eth->netdev[i]);
-
-               if (!eth->netdev[i])
-                       continue;
-
-               unregister_netdev(eth->netdev[i]);
-               free_netdev(eth->netdev[i]);
-               cancel_work_sync(&mac->pending_work);
-       }
-
-       return 0;
-}
-
-static const struct net_device_ops mtk_netdev_ops = {
-       .ndo_init               = mtk_init,
-       .ndo_uninit             = mtk_uninit,
-       .ndo_open               = mtk_open,
-       .ndo_stop               = mtk_stop,
-       .ndo_start_xmit         = mtk_start_xmit,
-       .ndo_set_mac_address    = mtk_set_mac_address,
-       .ndo_validate_addr      = eth_validate_addr,
-       .ndo_do_ioctl           = mtk_do_ioctl,
-       .ndo_change_mtu         = mtk_change_mtu,
-       .ndo_tx_timeout         = mtk_tx_timeout,
-       .ndo_get_stats64        = mtk_get_stats64,
-       .ndo_vlan_rx_add_vid    = mtk_vlan_rx_add_vid,
-       .ndo_vlan_rx_kill_vid   = mtk_vlan_rx_kill_vid,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller    = mtk_poll_controller,
-#endif
-};
-
-static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
-{
-       struct mtk_mac *mac;
-       const __be32 *_id = of_get_property(np, "reg", NULL);
-       int id, err;
-
-       if (!_id) {
-               dev_err(eth->dev, "missing mac id\n");
-               return -EINVAL;
-       }
-       id = be32_to_cpup(_id);
-       if (id >= eth->soc->mac_count || eth->netdev[id]) {
-               dev_err(eth->dev, "%d is not a valid mac id\n", id);
-               return -EINVAL;
-       }
-
-       eth->netdev[id] = alloc_etherdev(sizeof(*mac));
-       if (!eth->netdev[id]) {
-               dev_err(eth->dev, "alloc_etherdev failed\n");
-               return -ENOMEM;
-       }
-       mac = netdev_priv(eth->netdev[id]);
-       eth->mac[id] = mac;
-       mac->id = id;
-       mac->hw = eth;
-       mac->of_node = np;
-       INIT_WORK(&mac->pending_work, mtk_pending_work);
-
-       if (mtk_reg_table[MTK_REG_MTK_COUNTER_BASE]) {
-               mac->hw_stats = devm_kzalloc(eth->dev,
-                                            sizeof(*mac->hw_stats),
-                                            GFP_KERNEL);
-               if (!mac->hw_stats) {
-                       err = -ENOMEM;
-                       goto free_netdev;
-               }
-               spin_lock_init(&mac->hw_stats->stats_lock);
-               mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
-       }
-
-       SET_NETDEV_DEV(eth->netdev[id], eth->dev);
-       eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
-       eth->netdev[id]->base_addr = (unsigned long)eth->base;
-
-       if (eth->soc->init_data)
-               eth->soc->init_data(eth->soc, eth->netdev[id]);
-
-       eth->netdev[id]->vlan_features = eth->soc->hw_features &
-               ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
-       eth->netdev[id]->features |= eth->soc->hw_features;
-
-       if (mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE])
-               eth->netdev[id]->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
-
-       mtk_set_ethtool_ops(eth->netdev[id]);
-
-       err = register_netdev(eth->netdev[id]);
-       if (err) {
-               dev_err(eth->dev, "error bringing up device\n");
-               err = -ENOMEM;
-               goto free_netdev;
-       }
-       eth->netdev[id]->irq = eth->irq;
-       netif_info(eth, probe, eth->netdev[id],
-                  "mediatek frame engine at 0x%08lx, irq %d\n",
-                  eth->netdev[id]->base_addr, eth->netdev[id]->irq);
-
-       return 0;
-
-free_netdev:
-       free_netdev(eth->netdev[id]);
-       return err;
-}
-
-static int mtk_probe(struct platform_device *pdev)
-{
-       struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       const struct of_device_id *match;
-       struct device_node *mac_np;
-       struct mtk_soc_data *soc;
-       struct mtk_eth *eth;
-       struct clk *sysclk;
-       int err;
-
-       device_reset(&pdev->dev);
-
-       match = of_match_device(of_mtk_match, &pdev->dev);
-       soc = (struct mtk_soc_data *)match->data;
-
-       if (soc->reg_table)
-               mtk_reg_table = soc->reg_table;
-
-       eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
-       if (!eth)
-               return -ENOMEM;
-
-       eth->base = devm_ioremap_resource(&pdev->dev, res);
-       if (IS_ERR(eth->base))
-               return PTR_ERR(eth->base);
-
-       spin_lock_init(&eth->page_lock);
-
-       eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
-                                                     "mediatek,ethsys");
-       if (IS_ERR(eth->ethsys))
-               return PTR_ERR(eth->ethsys);
-
-       eth->irq = platform_get_irq(pdev, 0);
-       if (eth->irq < 0) {
-               dev_err(&pdev->dev, "no IRQ resource found\n");
-               return -ENXIO;
-       }
-
-       sysclk = devm_clk_get(&pdev->dev, NULL);
-       if (IS_ERR(sysclk)) {
-               dev_err(&pdev->dev,
-                       "the clock is not defined in the devicetree\n");
-               return -ENXIO;
-       }
-       eth->sysclk = clk_get_rate(sysclk);
-
-       eth->switch_np = of_parse_phandle(pdev->dev.of_node,
-                                         "mediatek,switch", 0);
-       if (soc->has_switch && !eth->switch_np) {
-               dev_err(&pdev->dev, "failed to read switch phandle\n");
-               return -ENODEV;
-       }
-
-       eth->dev = &pdev->dev;
-       eth->soc = soc;
-       eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
-
-       err = mtk_init_hw(eth);
-       if (err)
-               return err;
-
-       if (eth->soc->mac_count > 1) {
-               for_each_child_of_node(pdev->dev.of_node, mac_np) {
-                       if (!of_device_is_compatible(mac_np,
-                                                    "mediatek,eth-mac"))
-                               continue;
-
-                       if (!of_device_is_available(mac_np))
-                               continue;
-
-                       err = mtk_add_mac(eth, mac_np);
-                       if (err)
-                               goto err_free_dev;
-               }
-
-               init_dummy_netdev(&eth->dummy_dev);
-               netif_napi_add(&eth->dummy_dev, &eth->rx_napi, mtk_poll,
-                              soc->napi_weight);
-       } else {
-               err = mtk_add_mac(eth, pdev->dev.of_node);
-               if (err)
-                       goto err_free_dev;
-               netif_napi_add(eth->netdev[0], &eth->rx_napi, mtk_poll,
-                              soc->napi_weight);
-       }
-
-       platform_set_drvdata(pdev, eth);
-
-       return 0;
-
-err_free_dev:
-       mtk_cleanup(eth);
-       return err;
-}
-
-static int mtk_remove(struct platform_device *pdev)
-{
-       struct mtk_eth *eth = platform_get_drvdata(pdev);
-
-       netif_napi_del(&eth->rx_napi);
-       mtk_cleanup(eth);
-       platform_set_drvdata(pdev, NULL);
-
-       return 0;
-}
-
-static struct platform_driver mtk_driver = {
-       .probe = mtk_probe,
-       .remove = mtk_remove,
-       .driver = {
-               .name = "mtk_soc_eth",
-               .of_match_table = of_mtk_match,
-       },
-};
-
-module_platform_driver(mtk_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
-MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");
diff --git a/drivers/staging/mt7621-eth/mtk_eth_soc.h b/drivers/staging/mt7621-eth/mtk_eth_soc.h
deleted file mode 100644 (file)
index e6ed804..0000000
+++ /dev/null
@@ -1,716 +0,0 @@
-/*   This program is free software; you can redistribute it and/or modify
- *   it under the terms of the GNU General Public License as published by
- *   the Free Software Foundation; version 2 of the License
- *
- *   This program is distributed in the hope that it will be useful,
- *   but WITHOUT ANY WARRANTY; without even the implied warranty of
- *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *   GNU General Public License for more details.
- *
- *   Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
- *   Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
- *   Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
- */
-
-#ifndef MTK_ETH_H
-#define MTK_ETH_H
-
-#include <linux/mii.h>
-#include <linux/interrupt.h>
-#include <linux/netdevice.h>
-#include <linux/dma-mapping.h>
-#include <linux/phy.h>
-#include <linux/ethtool.h>
-#include <linux/version.h>
-#include <linux/atomic.h>
-
-/* these registers have different offsets depending on the SoC. we use a lookup
- * table for these
- */
-enum mtk_reg {
-       MTK_REG_PDMA_GLO_CFG = 0,
-       MTK_REG_PDMA_RST_CFG,
-       MTK_REG_DLY_INT_CFG,
-       MTK_REG_TX_BASE_PTR0,
-       MTK_REG_TX_MAX_CNT0,
-       MTK_REG_TX_CTX_IDX0,
-       MTK_REG_TX_DTX_IDX0,
-       MTK_REG_RX_BASE_PTR0,
-       MTK_REG_RX_MAX_CNT0,
-       MTK_REG_RX_CALC_IDX0,
-       MTK_REG_RX_DRX_IDX0,
-       MTK_REG_MTK_INT_ENABLE,
-       MTK_REG_MTK_INT_STATUS,
-       MTK_REG_MTK_DMA_VID_BASE,
-       MTK_REG_MTK_COUNTER_BASE,
-       MTK_REG_MTK_RST_GL,
-       MTK_REG_MTK_INT_STATUS2,
-       MTK_REG_COUNT
-};
-
-/* delayed interrupt bits */
-#define MTK_DELAY_EN_INT       0x80
-#define MTK_DELAY_MAX_INT      0x04
-#define MTK_DELAY_MAX_TOUT     0x04
-#define MTK_DELAY_TIME         20
-#define MTK_DELAY_CHAN         (((MTK_DELAY_EN_INT | MTK_DELAY_MAX_INT) << 8) \
-                                | MTK_DELAY_MAX_TOUT)
-#define MTK_DELAY_INIT         ((MTK_DELAY_CHAN << 16) | MTK_DELAY_CHAN)
-#define MTK_PSE_FQFC_CFG_INIT  0x80504000
-#define MTK_PSE_FQFC_CFG_256Q  0xff908000
-
-/* interrupt bits */
-#define MTK_CNT_PPE_AF         BIT(31)
-#define MTK_CNT_GDM_AF         BIT(29)
-#define MTK_PSE_P2_FC          BIT(26)
-#define MTK_PSE_BUF_DROP       BIT(24)
-#define MTK_GDM_OTHER_DROP     BIT(23)
-#define MTK_PSE_P1_FC          BIT(22)
-#define MTK_PSE_P0_FC          BIT(21)
-#define MTK_PSE_FQ_EMPTY       BIT(20)
-#define MTK_GE1_STA_CHG                BIT(18)
-#define MTK_TX_COHERENT                BIT(17)
-#define MTK_RX_COHERENT                BIT(16)
-#define MTK_TX_DONE_INT3       BIT(11)
-#define MTK_TX_DONE_INT2       BIT(10)
-#define MTK_TX_DONE_INT1       BIT(9)
-#define MTK_TX_DONE_INT0       BIT(8)
-#define MTK_RX_DONE_INT0       BIT(2)
-#define MTK_TX_DLY_INT         BIT(1)
-#define MTK_RX_DLY_INT         BIT(0)
-
-#define MTK_RX_DONE_INT                MTK_RX_DONE_INT0
-#define MTK_TX_DONE_INT                (MTK_TX_DONE_INT0 | MTK_TX_DONE_INT1 | \
-                                MTK_TX_DONE_INT2 | MTK_TX_DONE_INT3)
-
-#define RT5350_RX_DLY_INT      BIT(30)
-#define RT5350_TX_DLY_INT      BIT(28)
-#define RT5350_RX_DONE_INT1    BIT(17)
-#define RT5350_RX_DONE_INT0    BIT(16)
-#define RT5350_TX_DONE_INT3    BIT(3)
-#define RT5350_TX_DONE_INT2    BIT(2)
-#define RT5350_TX_DONE_INT1    BIT(1)
-#define RT5350_TX_DONE_INT0    BIT(0)
-
-#define RT5350_RX_DONE_INT     (RT5350_RX_DONE_INT0 | RT5350_RX_DONE_INT1)
-#define RT5350_TX_DONE_INT     (RT5350_TX_DONE_INT0 | RT5350_TX_DONE_INT1 | \
-                                RT5350_TX_DONE_INT2 | RT5350_TX_DONE_INT3)
-
-/* registers */
-#define MTK_GDMA_OFFSET                0x0020
-#define MTK_PSE_OFFSET         0x0040
-#define MTK_GDMA2_OFFSET       0x0060
-#define MTK_CDMA_OFFSET                0x0080
-#define MTK_DMA_VID0           0x00a8
-#define MTK_PDMA_OFFSET                0x0100
-#define MTK_PPE_OFFSET         0x0200
-#define MTK_CMTABLE_OFFSET     0x0400
-#define MTK_POLICYTABLE_OFFSET 0x1000
-
-#define MT7621_GDMA_OFFSET     0x0500
-#define MT7620_GDMA_OFFSET     0x0600
-
-#define RT5350_PDMA_OFFSET     0x0800
-#define RT5350_SDM_OFFSET      0x0c00
-
-#define MTK_MDIO_ACCESS                0x00
-#define MTK_MDIO_CFG           0x04
-#define MTK_GLO_CFG            0x08
-#define MTK_RST_GL             0x0C
-#define MTK_INT_STATUS         0x10
-#define MTK_INT_ENABLE         0x14
-#define MTK_MDIO_CFG2          0x18
-#define MTK_FOC_TS_T           0x1C
-
-#define        MTK_GDMA1_FWD_CFG       (MTK_GDMA_OFFSET + 0x00)
-#define MTK_GDMA1_SCH_CFG      (MTK_GDMA_OFFSET + 0x04)
-#define MTK_GDMA1_SHPR_CFG     (MTK_GDMA_OFFSET + 0x08)
-#define MTK_GDMA1_MAC_ADRL     (MTK_GDMA_OFFSET + 0x0C)
-#define MTK_GDMA1_MAC_ADRH     (MTK_GDMA_OFFSET + 0x10)
-
-#define        MTK_GDMA2_FWD_CFG       (MTK_GDMA2_OFFSET + 0x00)
-#define MTK_GDMA2_SCH_CFG      (MTK_GDMA2_OFFSET + 0x04)
-#define MTK_GDMA2_SHPR_CFG     (MTK_GDMA2_OFFSET + 0x08)
-#define MTK_GDMA2_MAC_ADRL     (MTK_GDMA2_OFFSET + 0x0C)
-#define MTK_GDMA2_MAC_ADRH     (MTK_GDMA2_OFFSET + 0x10)
-
-#define MTK_PSE_FQ_CFG         (MTK_PSE_OFFSET + 0x00)
-#define MTK_CDMA_FC_CFG                (MTK_PSE_OFFSET + 0x04)
-#define MTK_GDMA1_FC_CFG       (MTK_PSE_OFFSET + 0x08)
-#define MTK_GDMA2_FC_CFG       (MTK_PSE_OFFSET + 0x0C)
-
-#define MTK_CDMA_CSG_CFG       (MTK_CDMA_OFFSET + 0x00)
-#define MTK_CDMA_SCH_CFG       (MTK_CDMA_OFFSET + 0x04)
-
-#define        MT7621_GDMA_FWD_CFG(x)  (MT7621_GDMA_OFFSET + (x * 0x1000))
-
-/* FIXME this might be different for different SOCs */
-#define        MT7620_GDMA1_FWD_CFG    (MT7621_GDMA_OFFSET + 0x00)
-
-#define RT5350_TX_BASE_PTR0    (RT5350_PDMA_OFFSET + 0x00)
-#define RT5350_TX_MAX_CNT0     (RT5350_PDMA_OFFSET + 0x04)
-#define RT5350_TX_CTX_IDX0     (RT5350_PDMA_OFFSET + 0x08)
-#define RT5350_TX_DTX_IDX0     (RT5350_PDMA_OFFSET + 0x0C)
-#define RT5350_TX_BASE_PTR1    (RT5350_PDMA_OFFSET + 0x10)
-#define RT5350_TX_MAX_CNT1     (RT5350_PDMA_OFFSET + 0x14)
-#define RT5350_TX_CTX_IDX1     (RT5350_PDMA_OFFSET + 0x18)
-#define RT5350_TX_DTX_IDX1     (RT5350_PDMA_OFFSET + 0x1C)
-#define RT5350_TX_BASE_PTR2    (RT5350_PDMA_OFFSET + 0x20)
-#define RT5350_TX_MAX_CNT2     (RT5350_PDMA_OFFSET + 0x24)
-#define RT5350_TX_CTX_IDX2     (RT5350_PDMA_OFFSET + 0x28)
-#define RT5350_TX_DTX_IDX2     (RT5350_PDMA_OFFSET + 0x2C)
-#define RT5350_TX_BASE_PTR3    (RT5350_PDMA_OFFSET + 0x30)
-#define RT5350_TX_MAX_CNT3     (RT5350_PDMA_OFFSET + 0x34)
-#define RT5350_TX_CTX_IDX3     (RT5350_PDMA_OFFSET + 0x38)
-#define RT5350_TX_DTX_IDX3     (RT5350_PDMA_OFFSET + 0x3C)
-#define RT5350_RX_BASE_PTR0    (RT5350_PDMA_OFFSET + 0x100)
-#define RT5350_RX_MAX_CNT0     (RT5350_PDMA_OFFSET + 0x104)
-#define RT5350_RX_CALC_IDX0    (RT5350_PDMA_OFFSET + 0x108)
-#define RT5350_RX_DRX_IDX0     (RT5350_PDMA_OFFSET + 0x10C)
-#define RT5350_RX_BASE_PTR1    (RT5350_PDMA_OFFSET + 0x110)
-#define RT5350_RX_MAX_CNT1     (RT5350_PDMA_OFFSET + 0x114)
-#define RT5350_RX_CALC_IDX1    (RT5350_PDMA_OFFSET + 0x118)
-#define RT5350_RX_DRX_IDX1     (RT5350_PDMA_OFFSET + 0x11C)
-#define RT5350_PDMA_GLO_CFG    (RT5350_PDMA_OFFSET + 0x204)
-#define RT5350_PDMA_RST_CFG    (RT5350_PDMA_OFFSET + 0x208)
-#define RT5350_DLY_INT_CFG     (RT5350_PDMA_OFFSET + 0x20c)
-#define RT5350_MTK_INT_STATUS  (RT5350_PDMA_OFFSET + 0x220)
-#define RT5350_MTK_INT_ENABLE  (RT5350_PDMA_OFFSET + 0x228)
-#define RT5350_PDMA_SCH_CFG    (RT5350_PDMA_OFFSET + 0x280)
-
-#define MTK_PDMA_GLO_CFG       (MTK_PDMA_OFFSET + 0x00)
-#define MTK_PDMA_RST_CFG       (MTK_PDMA_OFFSET + 0x04)
-#define MTK_PDMA_SCH_CFG       (MTK_PDMA_OFFSET + 0x08)
-#define MTK_DLY_INT_CFG                (MTK_PDMA_OFFSET + 0x0C)
-#define MTK_TX_BASE_PTR0       (MTK_PDMA_OFFSET + 0x10)
-#define MTK_TX_MAX_CNT0                (MTK_PDMA_OFFSET + 0x14)
-#define MTK_TX_CTX_IDX0                (MTK_PDMA_OFFSET + 0x18)
-#define MTK_TX_DTX_IDX0                (MTK_PDMA_OFFSET + 0x1C)
-#define MTK_TX_BASE_PTR1       (MTK_PDMA_OFFSET + 0x20)
-#define MTK_TX_MAX_CNT1                (MTK_PDMA_OFFSET + 0x24)
-#define MTK_TX_CTX_IDX1                (MTK_PDMA_OFFSET + 0x28)
-#define MTK_TX_DTX_IDX1                (MTK_PDMA_OFFSET + 0x2C)
-#define MTK_RX_BASE_PTR0       (MTK_PDMA_OFFSET + 0x30)
-#define MTK_RX_MAX_CNT0                (MTK_PDMA_OFFSET + 0x34)
-#define MTK_RX_CALC_IDX0       (MTK_PDMA_OFFSET + 0x38)
-#define MTK_RX_DRX_IDX0                (MTK_PDMA_OFFSET + 0x3C)
-#define MTK_TX_BASE_PTR2       (MTK_PDMA_OFFSET + 0x40)
-#define MTK_TX_MAX_CNT2                (MTK_PDMA_OFFSET + 0x44)
-#define MTK_TX_CTX_IDX2                (MTK_PDMA_OFFSET + 0x48)
-#define MTK_TX_DTX_IDX2                (MTK_PDMA_OFFSET + 0x4C)
-#define MTK_TX_BASE_PTR3       (MTK_PDMA_OFFSET + 0x50)
-#define MTK_TX_MAX_CNT3                (MTK_PDMA_OFFSET + 0x54)
-#define MTK_TX_CTX_IDX3                (MTK_PDMA_OFFSET + 0x58)
-#define MTK_TX_DTX_IDX3                (MTK_PDMA_OFFSET + 0x5C)
-#define MTK_RX_BASE_PTR1       (MTK_PDMA_OFFSET + 0x60)
-#define MTK_RX_MAX_CNT1                (MTK_PDMA_OFFSET + 0x64)
-#define MTK_RX_CALC_IDX1       (MTK_PDMA_OFFSET + 0x68)
-#define MTK_RX_DRX_IDX1                (MTK_PDMA_OFFSET + 0x6C)
-
-/* Switch DMA configuration */
-#define RT5350_SDM_CFG         (RT5350_SDM_OFFSET + 0x00)
-#define RT5350_SDM_RRING       (RT5350_SDM_OFFSET + 0x04)
-#define RT5350_SDM_TRING       (RT5350_SDM_OFFSET + 0x08)
-#define RT5350_SDM_MAC_ADRL    (RT5350_SDM_OFFSET + 0x0C)
-#define RT5350_SDM_MAC_ADRH    (RT5350_SDM_OFFSET + 0x10)
-#define RT5350_SDM_TPCNT       (RT5350_SDM_OFFSET + 0x100)
-#define RT5350_SDM_TBCNT       (RT5350_SDM_OFFSET + 0x104)
-#define RT5350_SDM_RPCNT       (RT5350_SDM_OFFSET + 0x108)
-#define RT5350_SDM_RBCNT       (RT5350_SDM_OFFSET + 0x10C)
-#define RT5350_SDM_CS_ERR      (RT5350_SDM_OFFSET + 0x110)
-
-#define RT5350_SDM_ICS_EN      BIT(16)
-#define RT5350_SDM_TCS_EN      BIT(17)
-#define RT5350_SDM_UCS_EN      BIT(18)
-
-/* QDMA registers */
-#define MTK_QTX_CFG(x)         (0x1800 + (x * 0x10))
-#define MTK_QTX_SCH(x)         (0x1804 + (x * 0x10))
-#define MTK_QRX_BASE_PTR0      0x1900
-#define MTK_QRX_MAX_CNT0       0x1904
-#define MTK_QRX_CRX_IDX0       0x1908
-#define MTK_QRX_DRX_IDX0       0x190C
-#define MTK_QDMA_GLO_CFG       0x1A04
-#define MTK_QDMA_RST_IDX       0x1A08
-#define MTK_QDMA_DELAY_INT     0x1A0C
-#define MTK_QDMA_FC_THRES      0x1A10
-#define MTK_QMTK_INT_STATUS    0x1A18
-#define MTK_QMTK_INT_ENABLE    0x1A1C
-#define MTK_QDMA_HRED2         0x1A44
-
-#define MTK_QTX_CTX_PTR                0x1B00
-#define MTK_QTX_DTX_PTR                0x1B04
-
-#define MTK_QTX_CRX_PTR                0x1B10
-#define MTK_QTX_DRX_PTR                0x1B14
-
-#define MTK_QDMA_FQ_HEAD       0x1B20
-#define MTK_QDMA_FQ_TAIL       0x1B24
-#define MTK_QDMA_FQ_CNT                0x1B28
-#define MTK_QDMA_FQ_BLEN       0x1B2C
-
-#define QDMA_PAGE_SIZE         2048
-#define QDMA_TX_OWNER_CPU      BIT(31)
-#define QDMA_TX_SWC            BIT(14)
-#define TX_QDMA_SDL(_x)                (((_x) & 0x3fff) << 16)
-#define QDMA_RES_THRES         4
-
-/* MDIO_CFG register bits */
-#define MTK_MDIO_CFG_AUTO_POLL_EN      BIT(29)
-#define MTK_MDIO_CFG_GP1_BP_EN         BIT(16)
-#define MTK_MDIO_CFG_GP1_FRC_EN                BIT(15)
-#define MTK_MDIO_CFG_GP1_SPEED_10      (0 << 13)
-#define MTK_MDIO_CFG_GP1_SPEED_100     (1 << 13)
-#define MTK_MDIO_CFG_GP1_SPEED_1000    (2 << 13)
-#define MTK_MDIO_CFG_GP1_DUPLEX                BIT(12)
-#define MTK_MDIO_CFG_GP1_FC_TX         BIT(11)
-#define MTK_MDIO_CFG_GP1_FC_RX         BIT(10)
-#define MTK_MDIO_CFG_GP1_LNK_DWN       BIT(9)
-#define MTK_MDIO_CFG_GP1_AN_FAIL       BIT(8)
-#define MTK_MDIO_CFG_MDC_CLK_DIV_1     (0 << 6)
-#define MTK_MDIO_CFG_MDC_CLK_DIV_2     (1 << 6)
-#define MTK_MDIO_CFG_MDC_CLK_DIV_4     (2 << 6)
-#define MTK_MDIO_CFG_MDC_CLK_DIV_8     (3 << 6)
-#define MTK_MDIO_CFG_TURBO_MII_FREQ    BIT(5)
-#define MTK_MDIO_CFG_TURBO_MII_MODE    BIT(4)
-#define MTK_MDIO_CFG_RX_CLK_SKEW_0     (0 << 2)
-#define MTK_MDIO_CFG_RX_CLK_SKEW_200   (1 << 2)
-#define MTK_MDIO_CFG_RX_CLK_SKEW_400   (2 << 2)
-#define MTK_MDIO_CFG_RX_CLK_SKEW_INV   (3 << 2)
-#define MTK_MDIO_CFG_TX_CLK_SKEW_0     0
-#define MTK_MDIO_CFG_TX_CLK_SKEW_200   1
-#define MTK_MDIO_CFG_TX_CLK_SKEW_400   2
-#define MTK_MDIO_CFG_TX_CLK_SKEW_INV   3
-
-/* uni-cast port */
-#define MTK_GDM1_JMB_LEN_MASK  0xf
-#define MTK_GDM1_JMB_LEN_SHIFT 28
-#define MTK_GDM1_ICS_EN                BIT(22)
-#define MTK_GDM1_TCS_EN                BIT(21)
-#define MTK_GDM1_UCS_EN                BIT(20)
-#define MTK_GDM1_JMB_EN                BIT(19)
-#define MTK_GDM1_STRPCRC       BIT(16)
-#define MTK_GDM1_UFRC_P_CPU    (0 << 12)
-#define MTK_GDM1_UFRC_P_GDMA1  (1 << 12)
-#define MTK_GDM1_UFRC_P_PPE    (6 << 12)
-
-/* checksums */
-#define MTK_ICS_GEN_EN         BIT(2)
-#define MTK_UCS_GEN_EN         BIT(1)
-#define MTK_TCS_GEN_EN         BIT(0)
-
-/* dma mode */
-#define MTK_PDMA               BIT(0)
-#define MTK_QDMA               BIT(1)
-#define MTK_PDMA_RX_QDMA_TX    (MTK_PDMA | MTK_QDMA)
-
-/* dma ring */
-#define MTK_PST_DRX_IDX0       BIT(16)
-#define MTK_PST_DTX_IDX3       BIT(3)
-#define MTK_PST_DTX_IDX2       BIT(2)
-#define MTK_PST_DTX_IDX1       BIT(1)
-#define MTK_PST_DTX_IDX0       BIT(0)
-
-#define MTK_RX_2B_OFFSET       BIT(31)
-#define MTK_TX_WB_DDONE                BIT(6)
-#define MTK_RX_DMA_BUSY                BIT(3)
-#define MTK_TX_DMA_BUSY                BIT(1)
-#define MTK_RX_DMA_EN          BIT(2)
-#define MTK_TX_DMA_EN          BIT(0)
-
-#define MTK_PDMA_SIZE_4DWORDS  (0 << 4)
-#define MTK_PDMA_SIZE_8DWORDS  (1 << 4)
-#define MTK_PDMA_SIZE_16DWORDS (2 << 4)
-
-#define MTK_US_CYC_CNT_MASK    0xff
-#define MTK_US_CYC_CNT_SHIFT   0x8
-#define MTK_US_CYC_CNT_DIVISOR 1000000
-
-/* PDMA descriptor rxd2 */
-#define RX_DMA_DONE            BIT(31)
-#define RX_DMA_LSO             BIT(30)
-#define RX_DMA_PLEN0(_x)       (((_x) & 0x3fff) << 16)
-#define RX_DMA_GET_PLEN0(_x)   (((_x) >> 16) & 0x3fff)
-#define RX_DMA_TAG             BIT(15)
-
-/* PDMA descriptor rxd3 */
-#define RX_DMA_TPID(_x)                (((_x) >> 16) & 0xffff)
-#define RX_DMA_VID(_x)         ((_x) & 0xfff)
-
-/* PDMA descriptor rxd4 */
-#define RX_DMA_L4VALID         BIT(30)
-#define RX_DMA_FPORT_SHIFT     19
-#define RX_DMA_FPORT_MASK      0x7
-
-struct mtk_rx_dma {
-       unsigned int rxd1;
-       unsigned int rxd2;
-       unsigned int rxd3;
-       unsigned int rxd4;
-} __packed __aligned(4);
-
-/* PDMA tx descriptor bits */
-#define TX_DMA_BUF_LEN         0x3fff
-#define TX_DMA_PLEN0_MASK      (TX_DMA_BUF_LEN << 16)
-#define TX_DMA_PLEN0(_x)       (((_x) & TX_DMA_BUF_LEN) << 16)
-#define TX_DMA_PLEN1(_x)       ((_x) & TX_DMA_BUF_LEN)
-#define TX_DMA_GET_PLEN0(_x)    (((_x) >> 16) & TX_DMA_BUF_LEN)
-#define TX_DMA_GET_PLEN1(_x)    ((_x) & TX_DMA_BUF_LEN)
-#define TX_DMA_LS1             BIT(14)
-#define TX_DMA_LS0             BIT(30)
-#define TX_DMA_DONE            BIT(31)
-#define TX_DMA_FPORT_SHIFT     25
-#define TX_DMA_FPORT_MASK      0x7
-#define TX_DMA_INS_VLAN_MT7621 BIT(16)
-#define TX_DMA_INS_VLAN                BIT(7)
-#define TX_DMA_INS_PPPOE       BIT(12)
-#define TX_DMA_TAG             BIT(15)
-#define TX_DMA_TAG_MASK                BIT(15)
-#define TX_DMA_QN(_x)          ((_x) << 16)
-#define TX_DMA_PN(_x)          ((_x) << 24)
-#define TX_DMA_QN_MASK         TX_DMA_QN(0x7)
-#define TX_DMA_PN_MASK         TX_DMA_PN(0x7)
-#define TX_DMA_UDF             BIT(20)
-#define TX_DMA_CHKSUM          (0x7 << 29)
-#define TX_DMA_TSO             BIT(28)
-#define TX_DMA_DESP4_DEF       (TX_DMA_QN(3) | TX_DMA_PN(1))
-
-/* frame engine counters */
-#define MTK_PPE_AC_BCNT0       (MTK_CMTABLE_OFFSET + 0x00)
-#define MTK_GDMA1_TX_GBCNT     (MTK_CMTABLE_OFFSET + 0x300)
-#define MTK_GDMA2_TX_GBCNT     (MTK_GDMA1_TX_GBCNT + 0x40)
-
-/* phy device flags */
-#define MTK_PHY_FLAG_PORT      BIT(0)
-#define MTK_PHY_FLAG_ATTACH    BIT(1)
-
-struct mtk_tx_dma {
-       unsigned int txd1;
-       unsigned int txd2;
-       unsigned int txd3;
-       unsigned int txd4;
-} __packed __aligned(4);
-
-struct mtk_eth;
-struct mtk_mac;
-
-/* manage the attached phys */
-struct mtk_phy {
-       spinlock_t              lock;
-
-       struct phy_device       *phy[8];
-       struct device_node      *phy_node[8];
-       const __be32            *phy_fixed[8];
-       int                     duplex[8];
-       int                     speed[8];
-       int                     tx_fc[8];
-       int                     rx_fc[8];
-       int (*connect)(struct mtk_mac *mac);
-       void (*disconnect)(struct mtk_mac *mac);
-       void (*start)(struct mtk_mac *mac);
-       void (*stop)(struct mtk_mac *mac);
-};
-
-/* struct mtk_soc_data - the structure that holds the SoC specific data
- * @reg_table:         Some of the legacy registers changed their location
- *                     over time. Their offsets are stored in this table
- *
- * @init_data:         Some features depend on the silicon revision. This
- *                     callback allows runtime modification of the content of
- *                     this struct
- * @reset_fe:          This callback is used to trigger the reset of the frame
- *                     engine
- * @set_mac:           This callback is used to set the unicast mac address
- *                     filter
- * @fwd_config:                This callback is used to setup the forward config
- *                     register of the MAC
- * @switch_init:       This callback is used to bring up the switch core
- * @port_init:         Some SoCs have ports that can be router to a switch port
- *                     or an external PHY. This callback is used to setup these
- *                     ports.
- * @has_carrier:       This callback allows driver to check if there is a cable
- *                     attached.
- * @mdio_init:         This callbck is used to setup the MDIO bus if one is
- *                     present
- * @mdio_cleanup:      This callback is used to cleanup the MDIO state.
- * @mdio_write:                This callback is used to write data to the MDIO bus.
- * @mdio_read:         This callback is used to write data to the MDIO bus.
- * @mdio_adjust_link:  This callback is used to apply the PHY settings.
- * @piac_offset:       the PIAC register has a different different base offset
- * @hw_features:       feature set depends on the SoC type
- * @dma_ring_size:     allow GBit SoCs to set bigger rings than FE SoCs
- * @napi_weight:       allow GBit SoCs to set bigger napi weight than FE SoCs
- * @dma_type:          SoCs is PDMA, QDMA or a mix of the 2
- * @pdma_glo_cfg:      the default DMA configuration
- * @rx_int:            the TX interrupt bits used by the SoC
- * @tx_int:            the TX interrupt bits used by the SoC
- * @status_int:                the Status interrupt bits used by the SoC
- * @checksum_bit:      the bits used to turn on HW checksumming
- * @txd4:              default value of the TXD4 descriptor
- * @mac_count:         the number of MACs that the SoC has
- * @new_stats:         there is a old and new way to read hardware stats
- *                     registers
- * @jumbo_frame:       does the SoC support jumbo frames ?
- * @rx_2b_offset:      tell the rx dma to offset the data by 2 bytes
- * @rx_sg_dma:         scatter gather support
- * @padding_64b                enable 64 bit padding
- * @padding_bug:       rt2880 has a padding bug
- * @has_switch:                does the SoC have a built-in switch
- *
- * Although all of the supported SoCs share the same basic functionality, there
- * are several SoC specific functions and features that we need to support. This
- * struct holds the SoC specific data so that the common core can figure out
- * how to setup and use these differences.
- */
-struct mtk_soc_data {
-       const u16 *reg_table;
-
-       void (*init_data)(struct mtk_soc_data *data, struct net_device *netdev);
-       void (*reset_fe)(struct mtk_eth *eth);
-       void (*set_mac)(struct mtk_mac *mac, unsigned char *macaddr);
-       int (*fwd_config)(struct mtk_eth *eth);
-       int (*switch_init)(struct mtk_eth *eth);
-       void (*port_init)(struct mtk_eth *eth, struct mtk_mac *mac,
-                         struct device_node *port);
-       int (*has_carrier)(struct mtk_eth *eth);
-       int (*mdio_init)(struct mtk_eth *eth);
-       void (*mdio_cleanup)(struct mtk_eth *eth);
-       int (*mdio_write)(struct mii_bus *bus, int phy_addr, int phy_reg,
-                         u16 val);
-       int (*mdio_read)(struct mii_bus *bus, int phy_addr, int phy_reg);
-       void (*mdio_adjust_link)(struct mtk_eth *eth, int port);
-       u32 piac_offset;
-       netdev_features_t hw_features;
-       u32 dma_ring_size;
-       u32 napi_weight;
-       u32 dma_type;
-       u32 pdma_glo_cfg;
-       u32 rx_int;
-       u32 tx_int;
-       u32 status_int;
-       u32 checksum_bit;
-       u32 txd4;
-       u32 mac_count;
-
-       u32 new_stats:1;
-       u32 jumbo_frame:1;
-       u32 rx_2b_offset:1;
-       u32 rx_sg_dma:1;
-       u32 padding_64b:1;
-       u32 padding_bug:1;
-       u32 has_switch:1;
-};
-
-#define MTK_STAT_OFFSET                        0x40
-
-/* struct mtk_hw_stats - the structure that holds the traffic statistics.
- * @stats_lock:                make sure that stats operations are atomic
- * @reg_offset:                the status register offset of the SoC
- * @syncp:             the refcount
- *
- * All of the supported SoCs have hardware counters for traffic statstics.
- * Whenever the status IRQ triggers we can read the latest stats from these
- * counters and store them in this struct.
- */
-struct mtk_hw_stats {
-       spinlock_t stats_lock;
-       u32 reg_offset;
-       struct u64_stats_sync syncp;
-
-       u64 tx_bytes;
-       u64 tx_packets;
-       u64 tx_skip;
-       u64 tx_collisions;
-       u64 rx_bytes;
-       u64 rx_packets;
-       u64 rx_overflow;
-       u64 rx_fcs_errors;
-       u64 rx_short_errors;
-       u64 rx_long_errors;
-       u64 rx_checksum_errors;
-       u64 rx_flow_control_packets;
-};
-
-/* PDMA descriptor can point at 1-2 segments. This enum allows us to track how
- * memory was allocated so that it can be freed properly
- */
-enum mtk_tx_flags {
-       MTK_TX_FLAGS_SINGLE0    = 0x01,
-       MTK_TX_FLAGS_PAGE0      = 0x02,
-       MTK_TX_FLAGS_PAGE1      = 0x04,
-};
-
-/* struct mtk_tx_buf - This struct holds the pointers to the memory pointed at
- *                     by the TX descriptor    s
- * @skb:               The SKB pointer of the packet being sent
- * @dma_addr0:         The base addr of the first segment
- * @dma_len0:          The length of the first segment
- * @dma_addr1:         The base addr of the second segment
- * @dma_len1:          The length of the second segment
- */
-struct mtk_tx_buf {
-       struct sk_buff *skb;
-       u32 flags;
-       DEFINE_DMA_UNMAP_ADDR(dma_addr0);
-       DEFINE_DMA_UNMAP_LEN(dma_len0);
-       DEFINE_DMA_UNMAP_ADDR(dma_addr1);
-       DEFINE_DMA_UNMAP_LEN(dma_len1);
-};
-
-/* struct mtk_tx_ring -        This struct holds info describing a TX ring
- * @tx_dma:            The descriptor ring
- * @tx_buf:            The memory pointed at by the ring
- * @tx_phys:           The physical addr of tx_buf
- * @tx_next_free:      Pointer to the next free descriptor
- * @tx_last_free:      Pointer to the last free descriptor
- * @tx_thresh:         The threshold of minimum amount of free descriptors
- * @tx_map:            Callback to map a new packet into the ring
- * @tx_poll:           Callback for the housekeeping function
- * @tx_clean:          Callback for the cleanup function
- * @tx_ring_size:      How many descriptors are in the ring
- * @tx_free_idx:       The index of th next free descriptor
- * @tx_next_idx:       QDMA uses a linked list. This element points to the next
- *                     free descriptor in the list
- * @tx_free_count:     QDMA uses a linked list. Track how many free descriptors
- *                     are present
- */
-struct mtk_tx_ring {
-       struct mtk_tx_dma *tx_dma;
-       struct mtk_tx_buf *tx_buf;
-       dma_addr_t tx_phys;
-       struct mtk_tx_dma *tx_next_free;
-       struct mtk_tx_dma *tx_last_free;
-       u16 tx_thresh;
-       int (*tx_map)(struct sk_buff *skb, struct net_device *dev, int tx_num,
-                     struct mtk_tx_ring *ring, bool gso);
-       int (*tx_poll)(struct mtk_eth *eth, int budget, bool *tx_again);
-       void (*tx_clean)(struct mtk_eth *eth);
-
-       /* PDMA only */
-       u16 tx_ring_size;
-       u16 tx_free_idx;
-
-       /* QDMA only */
-       u16 tx_next_idx;
-       atomic_t tx_free_count;
-};
-
-/* struct mtk_rx_ring -        This struct holds info describing a RX ring
- * @rx_dma:            The descriptor ring
- * @rx_data:           The memory pointed at by the ring
- * @trx_phys:          The physical addr of rx_buf
- * @rx_ring_size:      How many descriptors are in the ring
- * @rx_buf_size:       The size of each packet buffer
- * @rx_calc_idx:       The current head of ring
- */
-struct mtk_rx_ring {
-       struct mtk_rx_dma *rx_dma;
-       u8 **rx_data;
-       dma_addr_t rx_phys;
-       u16 rx_ring_size;
-       u16 frag_size;
-       u16 rx_buf_size;
-       u16 rx_calc_idx;
-};
-
-/* currently no SoC has more than 2 macs */
-#define MTK_MAX_DEVS                   2
-
-/* struct mtk_eth -    This is the main datasructure for holding the state
- *                     of the driver
- * @dev:               The device pointer
- * @base:              The mapped register i/o base
- * @page_lock:         Make sure that register operations are atomic
- * @soc:               pointer to our SoC specific data
- * @dummy_dev:         we run 2 netdevs on 1 physical DMA ring and need a
- *                     dummy for NAPI to work
- * @netdev:            The netdev instances
- * @mac:               Each netdev is linked to a physical MAC
- * @switch_np:         The phandle for the switch
- * @irq:               The IRQ that we are using
- * @msg_enable:                Ethtool msg level
- * @ysclk:             The sysclk rate - neeed for calibration
- * @ethsys:            The register map pointing at the range used to setup
- *                     MII modes
- * @dma_refcnt:                track how many netdevs are using the DMA engine
- * @tx_ring:           Pointer to the memore holding info about the TX ring
- * @rx_ring:           Pointer to the memore holding info about the RX ring
- * @rx_napi:           The NAPI struct
- * @scratch_ring:      Newer SoCs need memory for a second HW managed TX ring
- * @scratch_head:      The scratch memory that scratch_ring points to.
- * @phy:               Info about the attached PHYs
- * @mii_bus:           If there is a bus we need to create an instance for it
- * @link:              Track if the ports have a physical link
- * @sw_priv:           Pointer to the switches private data
- * @vlan_map:          RX VID tracking
- */
-
-struct mtk_eth {
-       struct device                   *dev;
-       void __iomem                    *base;
-       spinlock_t                      page_lock;
-       struct mtk_soc_data             *soc;
-       struct net_device               dummy_dev;
-       struct net_device               *netdev[MTK_MAX_DEVS];
-       struct mtk_mac                  *mac[MTK_MAX_DEVS];
-       struct device_node              *switch_np;
-       int                             irq;
-       u32                             msg_enable;
-       unsigned long                   sysclk;
-       struct regmap                   *ethsys;
-       atomic_t                        dma_refcnt;
-       struct mtk_tx_ring              tx_ring;
-       struct mtk_rx_ring              rx_ring[2];
-       struct napi_struct              rx_napi;
-       struct mtk_tx_dma               *scratch_ring;
-       void                            *scratch_head;
-       struct mtk_phy                  *phy;
-       struct mii_bus                  *mii_bus;
-       int                             link[8];
-       void                            *sw_priv;
-       unsigned long                   vlan_map;
-};
-
-/* struct mtk_mac -    the structure that holds the info about the MACs of the
- *                     SoC
- * @id:                        The number of the MAC
- * @of_node:           Our devicetree node
- * @hw:                        Backpointer to our main datastruture
- * @hw_stats:          Packet statistics counter
- * @phy_dev:           The attached PHY if available
- * @phy_flags:         The PHYs flags
- * @pending_work:      The workqueue used to reset the dma ring
- */
-struct mtk_mac {
-       int                             id;
-       struct device_node              *of_node;
-       struct mtk_eth                  *hw;
-       struct mtk_hw_stats             *hw_stats;
-       struct phy_device               *phy_dev;
-       u32                             phy_flags;
-       struct work_struct              pending_work;
-};
-
-/* the struct describing the SoC. these are declared in the soc_xyz.c files */
-extern const struct of_device_id of_mtk_match[];
-
-/* read the hardware status register */
-void mtk_stats_update_mac(struct mtk_mac *mac);
-
-/* default checksum setup handler */
-void mtk_reset(struct mtk_eth *eth, u32 reset_bits);
-
-/* register i/o wrappers */
-void mtk_w32(struct mtk_eth *eth, u32 val, unsigned int reg);
-u32 mtk_r32(struct mtk_eth *eth, unsigned int reg);
-
-/* default clock calibration handler */
-int mtk_set_clock_cycle(struct mtk_eth *eth);
-
-/* default checksum setup handler */
-void mtk_csum_config(struct mtk_eth *eth);
-
-/* default forward config handler */
-void mtk_fwd_config(struct mtk_eth *eth);
-
-#endif /* MTK_ETH_H */
diff --git a/drivers/staging/mt7621-eth/soc_mt7621.c b/drivers/staging/mt7621-eth/soc_mt7621.c
deleted file mode 100644 (file)
index 5d63b5d..0000000
+++ /dev/null
@@ -1,161 +0,0 @@
-/*   This program is free software; you can redistribute it and/or modify
- *   it under the terms of the GNU General Public License as published by
- *   the Free Software Foundation; version 2 of the License
- *
- *   This program is distributed in the hope that it will be useful,
- *   but WITHOUT ANY WARRANTY; without even the implied warranty of
- *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *   GNU General Public License for more details.
- *
- *   Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
- *   Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
- *   Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
- */
-
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/if_vlan.h>
-#include <linux/of_net.h>
-
-#include <asm/mach-ralink/ralink_regs.h>
-
-#include "mtk_eth_soc.h"
-#include "gsw_mt7620.h"
-#include "mdio.h"
-
-#define MT7620_CDMA_CSG_CFG    0x400
-#define MT7621_CDMP_IG_CTRL    (MT7620_CDMA_CSG_CFG + 0x00)
-#define MT7621_CDMP_EG_CTRL    (MT7620_CDMA_CSG_CFG + 0x04)
-#define MT7621_RESET_FE                BIT(6)
-#define MT7621_L4_VALID                BIT(24)
-
-#define MT7621_TX_DMA_UDF      BIT(19)
-
-#define CDMA_ICS_EN            BIT(2)
-#define CDMA_UCS_EN            BIT(1)
-#define CDMA_TCS_EN            BIT(0)
-
-#define GDMA_ICS_EN            BIT(22)
-#define GDMA_TCS_EN            BIT(21)
-#define GDMA_UCS_EN            BIT(20)
-
-/* frame engine counters */
-#define MT7621_REG_MIB_OFFSET  0x2000
-#define MT7621_PPE_AC_BCNT0    (MT7621_REG_MIB_OFFSET + 0x00)
-#define MT7621_GDM1_TX_GBCNT   (MT7621_REG_MIB_OFFSET + 0x400)
-#define MT7621_GDM2_TX_GBCNT   (MT7621_GDM1_TX_GBCNT + 0x40)
-
-#define GSW_REG_GDMA1_MAC_ADRL 0x508
-#define GSW_REG_GDMA1_MAC_ADRH 0x50C
-#define GSW_REG_GDMA2_MAC_ADRL 0x1508
-#define GSW_REG_GDMA2_MAC_ADRH 0x150C
-
-#define MT7621_MTK_RST_GL      0x04
-#define MT7620_MTK_INT_STATUS2 0x08
-
-/* MTK_INT_STATUS reg on mt7620 define CNT_GDM1_AF at BIT(29)
- * but after test it should be BIT(13).
- */
-#define MT7621_MTK_GDM1_AF     BIT(28)
-#define MT7621_MTK_GDM2_AF     BIT(29)
-
-static const u16 mt7621_reg_table[MTK_REG_COUNT] = {
-       [MTK_REG_PDMA_GLO_CFG] = RT5350_PDMA_GLO_CFG,
-       [MTK_REG_PDMA_RST_CFG] = RT5350_PDMA_RST_CFG,
-       [MTK_REG_DLY_INT_CFG] = RT5350_DLY_INT_CFG,
-       [MTK_REG_TX_BASE_PTR0] = RT5350_TX_BASE_PTR0,
-       [MTK_REG_TX_MAX_CNT0] = RT5350_TX_MAX_CNT0,
-       [MTK_REG_TX_CTX_IDX0] = RT5350_TX_CTX_IDX0,
-       [MTK_REG_TX_DTX_IDX0] = RT5350_TX_DTX_IDX0,
-       [MTK_REG_RX_BASE_PTR0] = RT5350_RX_BASE_PTR0,
-       [MTK_REG_RX_MAX_CNT0] = RT5350_RX_MAX_CNT0,
-       [MTK_REG_RX_CALC_IDX0] = RT5350_RX_CALC_IDX0,
-       [MTK_REG_RX_DRX_IDX0] = RT5350_RX_DRX_IDX0,
-       [MTK_REG_MTK_INT_ENABLE] = RT5350_MTK_INT_ENABLE,
-       [MTK_REG_MTK_INT_STATUS] = RT5350_MTK_INT_STATUS,
-       [MTK_REG_MTK_DMA_VID_BASE] = 0,
-       [MTK_REG_MTK_COUNTER_BASE] = MT7621_GDM1_TX_GBCNT,
-       [MTK_REG_MTK_RST_GL] = MT7621_MTK_RST_GL,
-       [MTK_REG_MTK_INT_STATUS2] = MT7620_MTK_INT_STATUS2,
-};
-
-static void mt7621_mtk_reset(struct mtk_eth *eth)
-{
-       mtk_reset(eth, MT7621_RESET_FE);
-}
-
-static int mt7621_fwd_config(struct mtk_eth *eth)
-{
-       /* Setup GMAC1 only, there is no support for GMAC2 yet */
-       mtk_w32(eth, mtk_r32(eth, MT7620_GDMA1_FWD_CFG) & ~0xffff,
-               MT7620_GDMA1_FWD_CFG);
-
-       /* Enable RX checksum */
-       mtk_w32(eth, mtk_r32(eth, MT7620_GDMA1_FWD_CFG) | (GDMA_ICS_EN |
-                      GDMA_TCS_EN | GDMA_UCS_EN),
-                      MT7620_GDMA1_FWD_CFG);
-
-       /* Enable RX VLan Offloading */
-       mtk_w32(eth, 0, MT7621_CDMP_EG_CTRL);
-
-       return 0;
-}
-
-static void mt7621_set_mac(struct mtk_mac *mac, unsigned char *hwaddr)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&mac->hw->page_lock, flags);
-       if (mac->id == 0) {
-               mtk_w32(mac->hw, (hwaddr[0] << 8) | hwaddr[1],
-                       GSW_REG_GDMA1_MAC_ADRH);
-               mtk_w32(mac->hw, (hwaddr[2] << 24) | (hwaddr[3] << 16) |
-                       (hwaddr[4] << 8) | hwaddr[5],
-                       GSW_REG_GDMA1_MAC_ADRL);
-       }
-       if (mac->id == 1) {
-               mtk_w32(mac->hw, (hwaddr[0] << 8) | hwaddr[1],
-                       GSW_REG_GDMA2_MAC_ADRH);
-               mtk_w32(mac->hw, (hwaddr[2] << 24) | (hwaddr[3] << 16) |
-                       (hwaddr[4] << 8) | hwaddr[5],
-                       GSW_REG_GDMA2_MAC_ADRL);
-       }
-       spin_unlock_irqrestore(&mac->hw->page_lock, flags);
-}
-
-static struct mtk_soc_data mt7621_data = {
-       .hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
-                      NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
-                      NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
-                      NETIF_F_IPV6_CSUM,
-       .dma_type = MTK_PDMA,
-       .dma_ring_size = 256,
-       .napi_weight = 64,
-       .new_stats = 1,
-       .padding_64b = 1,
-       .rx_2b_offset = 1,
-       .rx_sg_dma = 1,
-       .has_switch = 1,
-       .mac_count = 2,
-       .reset_fe = mt7621_mtk_reset,
-       .set_mac = mt7621_set_mac,
-       .fwd_config = mt7621_fwd_config,
-       .switch_init = mtk_gsw_init,
-       .reg_table = mt7621_reg_table,
-       .pdma_glo_cfg = MTK_PDMA_SIZE_16DWORDS,
-       .rx_int = RT5350_RX_DONE_INT,
-       .tx_int = RT5350_TX_DONE_INT,
-       .status_int = MT7621_MTK_GDM1_AF | MT7621_MTK_GDM2_AF,
-       .checksum_bit = MT7621_L4_VALID,
-       .has_carrier = mt7620_has_carrier,
-       .mdio_read = mt7620_mdio_read,
-       .mdio_write = mt7620_mdio_write,
-       .mdio_adjust_link = mt7620_mdio_link_adjust,
-};
-
-const struct of_device_id of_mtk_match[] = {
-       { .compatible = "mediatek,mt7621-eth", .data = &mt7621_data },
-       {},
-};
-
-MODULE_DEVICE_TABLE(of, of_mtk_match);
index d33533872a16f1c4e4e3d7207d6cc04cd6a63379..c8fa17cfa807a24f6450034c714ebc94b59507d4 100644 (file)
@@ -1,6 +1,7 @@
 config PCI_MT7621
        tristate "MediaTek MT7621 PCI Controller"
        depends on RALINK
+       depends on PCI
        select PCI_DRIVERS_GENERIC
        help
          This selects a driver for the MediaTek MT7621 PCI Controller.
index d6248eecf123bdc5ad123ac1fd7b5821b9a22e24..2aee64fdaec555abf8734aef11c7d269dca86150 100644 (file)
@@ -163,7 +163,7 @@ int cvm_oct_phy_setup_device(struct net_device *dev)
                goto no_phy;
 
        phydev = of_phy_connect(dev, phy_node, cvm_oct_adjust_link, 0,
-                               PHY_INTERFACE_MODE_GMII);
+                               priv->phy_mode);
        of_node_put(phy_node);
 
        if (!phydev)
index ce61c5670ef645c78e080ab588b0d75c7591378c..986db76705ccc6b5d384f2db08f715739936d942 100644 (file)
@@ -653,14 +653,37 @@ static struct device_node *cvm_oct_node_for_port(struct device_node *pip,
        return np;
 }
 
-static void cvm_set_rgmii_delay(struct device_node *np, int iface, int port)
+static void cvm_set_rgmii_delay(struct octeon_ethernet *priv, int iface,
+                               int port)
 {
+       struct device_node *np = priv->of_node;
        u32 delay_value;
+       bool rx_delay;
+       bool tx_delay;
 
-       if (!of_property_read_u32(np, "rx-delay", &delay_value))
+       /* By default, both RX/TX delay is enabled in
+        * __cvmx_helper_rgmii_enable().
+        */
+       rx_delay = true;
+       tx_delay = true;
+
+       if (!of_property_read_u32(np, "rx-delay", &delay_value)) {
                cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(port, iface), delay_value);
-       if (!of_property_read_u32(np, "tx-delay", &delay_value))
+               rx_delay = delay_value > 0;
+       }
+       if (!of_property_read_u32(np, "tx-delay", &delay_value)) {
                cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(port, iface), delay_value);
+               tx_delay = delay_value > 0;
+       }
+
+       if (!rx_delay && !tx_delay)
+               priv->phy_mode = PHY_INTERFACE_MODE_RGMII_ID;
+       else if (!rx_delay)
+               priv->phy_mode = PHY_INTERFACE_MODE_RGMII_RXID;
+       else if (!tx_delay)
+               priv->phy_mode = PHY_INTERFACE_MODE_RGMII_TXID;
+       else
+               priv->phy_mode = PHY_INTERFACE_MODE_RGMII;
 }
 
 static int cvm_oct_probe(struct platform_device *pdev)
@@ -825,6 +848,7 @@ static int cvm_oct_probe(struct platform_device *pdev)
                        priv->port = port;
                        priv->queue = cvmx_pko_get_base_queue(priv->port);
                        priv->fau = fau - cvmx_pko_get_num_queues(port) * 4;
+                       priv->phy_mode = PHY_INTERFACE_MODE_NA;
                        for (qos = 0; qos < 16; qos++)
                                skb_queue_head_init(&priv->tx_free_list[qos]);
                        for (qos = 0; qos < cvmx_pko_get_num_queues(port);
@@ -856,6 +880,7 @@ static int cvm_oct_probe(struct platform_device *pdev)
                                break;
 
                        case CVMX_HELPER_INTERFACE_MODE_SGMII:
+                               priv->phy_mode = PHY_INTERFACE_MODE_SGMII;
                                dev->netdev_ops = &cvm_oct_sgmii_netdev_ops;
                                strcpy(dev->name, "eth%d");
                                break;
@@ -865,11 +890,16 @@ static int cvm_oct_probe(struct platform_device *pdev)
                                strcpy(dev->name, "spi%d");
                                break;
 
-                       case CVMX_HELPER_INTERFACE_MODE_RGMII:
                        case CVMX_HELPER_INTERFACE_MODE_GMII:
+                               priv->phy_mode = PHY_INTERFACE_MODE_GMII;
+                               dev->netdev_ops = &cvm_oct_rgmii_netdev_ops;
+                               strcpy(dev->name, "eth%d");
+                               break;
+
+                       case CVMX_HELPER_INTERFACE_MODE_RGMII:
                                dev->netdev_ops = &cvm_oct_rgmii_netdev_ops;
                                strcpy(dev->name, "eth%d");
-                               cvm_set_rgmii_delay(priv->of_node, interface,
+                               cvm_set_rgmii_delay(priv, interface,
                                                    port_index);
                                break;
                        }
index 4a07e7f43d128cb6903891ad6fcbd399b61e0e61..be570d33685add6873f62740f7c333b8920096aa 100644 (file)
@@ -12,7 +12,7 @@
 #define OCTEON_ETHERNET_H
 
 #include <linux/of.h>
-
+#include <linux/phy.h>
 #include <asm/octeon/cvmx-helper-board.h>
 
 /**
@@ -33,6 +33,8 @@ struct octeon_ethernet {
         * cvmx_helper_interface_mode_t
         */
        int imode;
+       /* PHY mode */
+       phy_interface_t phy_mode;
        /* List of outstanding tx buffers per queue */
        struct sk_buff_head tx_free_list[16];
        unsigned int last_speed;
index 80b8d4153414a80d555d1dfe87fb48e77e684e10..a54286498a477fd2b935683f695d6e57ba8382d5 100644 (file)
@@ -45,7 +45,7 @@ static int dcon_init_xo_1(struct dcon_priv *dcon)
 {
        unsigned char lob;
        int ret, i;
-       struct dcon_gpio *pin = &gpios_asis[0];
+       const struct dcon_gpio *pin = &gpios_asis[0];
 
        for (i = 0; i < ARRAY_SIZE(gpios_asis); i++) {
                gpios[i] = devm_gpiod_get(&dcon->client->dev, pin[i].name,
index 1723a47a96b4092fb16df938f9ec1699c220f0f0..952f2ab5134783db8e8978fd79282045a9f65db6 100644 (file)
@@ -174,7 +174,9 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
 
        pxmitpriv->free_xmit_extbuf_cnt = num_xmit_extbuf;
 
-       rtw_alloc_hwxmits(padapter);
+       res = rtw_alloc_hwxmits(padapter);
+       if (res == _FAIL)
+               goto exit;
        rtw_init_hwxmits(pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry);
 
        for (i = 0; i < 4; i++)
@@ -1503,7 +1505,7 @@ s32 rtw_xmit_classifier(struct adapter *padapter, struct xmit_frame *pxmitframe)
        return res;
 }
 
-void rtw_alloc_hwxmits(struct adapter *padapter)
+s32 rtw_alloc_hwxmits(struct adapter *padapter)
 {
        struct hw_xmit *hwxmits;
        struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
@@ -1512,6 +1514,8 @@ void rtw_alloc_hwxmits(struct adapter *padapter)
 
        pxmitpriv->hwxmits = kcalloc(pxmitpriv->hwxmit_entry,
                                     sizeof(struct hw_xmit), GFP_KERNEL);
+       if (!pxmitpriv->hwxmits)
+               return _FAIL;
 
        hwxmits = pxmitpriv->hwxmits;
 
@@ -1519,6 +1523,7 @@ void rtw_alloc_hwxmits(struct adapter *padapter)
        hwxmits[1] .sta_queue = &pxmitpriv->vi_pending;
        hwxmits[2] .sta_queue = &pxmitpriv->be_pending;
        hwxmits[3] .sta_queue = &pxmitpriv->bk_pending;
+       return _SUCCESS;
 }
 
 void rtw_free_hwxmits(struct adapter *padapter)
index 788f59c74ea1e45fb7e598a8f83361b586647969..ba7e15fbde72d60ab9f5589756f024642359a82f 100644 (file)
@@ -336,7 +336,7 @@ s32 rtw_txframes_sta_ac_pending(struct adapter *padapter,
 void rtw_init_hwxmits(struct hw_xmit *phwxmit, int entry);
 s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter);
 void _rtw_free_xmit_priv(struct xmit_priv *pxmitpriv);
-void rtw_alloc_hwxmits(struct adapter *padapter);
+s32 rtw_alloc_hwxmits(struct adapter *padapter);
 void rtw_free_hwxmits(struct adapter *padapter);
 s32 rtw_xmit(struct adapter *padapter, struct sk_buff **pkt);
 
index 1920d02f7c9f3724cb1516f857875f1bb8a76cfd..8c36acedf50769312ab170c983c192824fcad66a 100644 (file)
@@ -147,17 +147,9 @@ static u8 write_macreg_hdl(struct _adapter *padapter, u8 *pbuf)
 
 static u8 read_bbreg_hdl(struct _adapter *padapter, u8 *pbuf)
 {
-       u32 val;
-       void (*pcmd_callback)(struct _adapter *dev, struct cmd_obj      *pcmd);
        struct cmd_obj *pcmd  = (struct cmd_obj *)pbuf;
 
-       if (pcmd->rsp && pcmd->rspsz > 0)
-               memcpy(pcmd->rsp, (u8 *)&val, pcmd->rspsz);
-       pcmd_callback = cmd_callback[pcmd->cmdcode].callback;
-       if (!pcmd_callback)
-               r8712_free_cmd_obj(pcmd);
-       else
-               pcmd_callback(padapter, pcmd);
+       r8712_free_cmd_obj(pcmd);
        return H2C_SUCCESS;
 }
 
index 92fb77666d4462d411d927d26a7bd58d1503c8ca..1ef86b8c592f1490c41bb5436fb45d4e8e065ed5 100644 (file)
@@ -140,7 +140,7 @@ enum rtl8712_h2c_cmd {
 static struct _cmd_callback    cmd_callback[] = {
        {GEN_CMD_CODE(_Read_MACREG), NULL}, /*0*/
        {GEN_CMD_CODE(_Write_MACREG), NULL},
-       {GEN_CMD_CODE(_Read_BBREG), &r8712_getbbrfreg_cmdrsp_callback},
+       {GEN_CMD_CODE(_Read_BBREG), NULL},
        {GEN_CMD_CODE(_Write_BBREG), NULL},
        {GEN_CMD_CODE(_Read_RFREG), &r8712_getbbrfreg_cmdrsp_callback},
        {GEN_CMD_CODE(_Write_RFREG), NULL}, /*5*/
index 094d61bcb46983226a2cbf8974c5dd2e78758e05..b87f13a0b5639acbbb9d88265a6e810ddfb80924 100644 (file)
@@ -260,7 +260,9 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
                }
        }
 
-       rtw_alloc_hwxmits(padapter);
+       res = rtw_alloc_hwxmits(padapter);
+       if (res == _FAIL)
+               goto exit;
        rtw_init_hwxmits(pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry);
 
        for (i = 0; i < 4; i++) {
@@ -2144,7 +2146,7 @@ s32 rtw_xmit_classifier(struct adapter *padapter, struct xmit_frame *pxmitframe)
        return res;
 }
 
-void rtw_alloc_hwxmits(struct adapter *padapter)
+s32 rtw_alloc_hwxmits(struct adapter *padapter)
 {
        struct hw_xmit *hwxmits;
        struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
@@ -2155,10 +2157,8 @@ void rtw_alloc_hwxmits(struct adapter *padapter)
 
        pxmitpriv->hwxmits = rtw_zmalloc(sizeof(struct hw_xmit) * pxmitpriv->hwxmit_entry);
 
-       if (pxmitpriv->hwxmits == NULL) {
-               DBG_871X("alloc hwxmits fail!...\n");
-               return;
-       }
+       if (!pxmitpriv->hwxmits)
+               return _FAIL;
 
        hwxmits = pxmitpriv->hwxmits;
 
@@ -2204,7 +2204,7 @@ void rtw_alloc_hwxmits(struct adapter *padapter)
 
        }
 
-
+       return _SUCCESS;
 }
 
 void rtw_free_hwxmits(struct adapter *padapter)
index 1b38b9182b3165bdfacf97fcc2fa43c34b276468..37f42b2f22f1dcf173b2deeadc904df79f216036 100644 (file)
@@ -487,7 +487,7 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter);
 void _rtw_free_xmit_priv (struct xmit_priv *pxmitpriv);
 
 
-void rtw_alloc_hwxmits(struct adapter *padapter);
+s32 rtw_alloc_hwxmits(struct adapter *padapter);
 void rtw_free_hwxmits(struct adapter *padapter);
 
 
index 9930ed954abb2d8aa437a1784fbd3e3f83a043ca..4cc77b2016e1e45f14834820b429f7e8cda19b42 100644 (file)
@@ -180,6 +180,8 @@ static int rtl_phydm_init_priv(struct rtl_priv *rtlpriv,
 
        rtlpriv->phydm.internal =
                kzalloc(sizeof(struct phy_dm_struct), GFP_KERNEL);
+       if (!rtlpriv->phydm.internal)
+               return 0;
 
        _rtl_phydm_init_com_info(rtlpriv, ic, params);
 
index f061dd1382aa102e53ac532844c8e2c9959ae568..cf6b7a80b753b35dc2d488e589f97f9d1899fedb 100644 (file)
@@ -743,6 +743,8 @@ void rtl8822be_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
                      u1_rsvd_page_loc, 3);
 
        skb = dev_alloc_skb(totalpacketlen);
+       if (!skb)
+               return;
        memcpy((u8 *)skb_put(skb, totalpacketlen), &reserved_page_packet,
               totalpacketlen);
 
index edff6ce8565558f0671a4fb13119488a6dd254c4..9d85a3a1af4c5eadef475ffd20e39e66612955dc 100644 (file)
@@ -210,12 +210,15 @@ static ssize_t softsynthx_read(struct file *fp, char __user *buf, size_t count,
                return -EINVAL;
 
        spin_lock_irqsave(&speakup_info.spinlock, flags);
+       synth_soft.alive = 1;
        while (1) {
                prepare_to_wait(&speakup_event, &wait, TASK_INTERRUPTIBLE);
-               if (!unicode)
-                       synth_buffer_skip_nonlatin1();
-               if (!synth_buffer_empty() || speakup_info.flushing)
-                       break;
+               if (synth_current() == &synth_soft) {
+                       if (!unicode)
+                               synth_buffer_skip_nonlatin1();
+                       if (!synth_buffer_empty() || speakup_info.flushing)
+                               break;
+               }
                spin_unlock_irqrestore(&speakup_info.spinlock, flags);
                if (fp->f_flags & O_NONBLOCK) {
                        finish_wait(&speakup_event, &wait);
@@ -235,6 +238,8 @@ static ssize_t softsynthx_read(struct file *fp, char __user *buf, size_t count,
 
        /* Keep 3 bytes available for a 16bit UTF-8-encoded character */
        while (chars_sent <= count - bytes_per_ch) {
+               if (synth_current() != &synth_soft)
+                       break;
                if (speakup_info.flushing) {
                        speakup_info.flushing = 0;
                        ch = '\x18';
@@ -331,7 +336,8 @@ static __poll_t softsynth_poll(struct file *fp, struct poll_table_struct *wait)
        poll_wait(fp, &speakup_event, wait);
 
        spin_lock_irqsave(&speakup_info.spinlock, flags);
-       if (!synth_buffer_empty() || speakup_info.flushing)
+       if (synth_current() == &synth_soft &&
+           (!synth_buffer_empty() || speakup_info.flushing))
                ret = EPOLLIN | EPOLLRDNORM;
        spin_unlock_irqrestore(&speakup_info.spinlock, flags);
        return ret;
index c8e688878fc705a47d88cfa1f4f73e2dceaefdc2..ac6a74883af4753d33906e62c366b3d4f8a1c677 100644 (file)
@@ -74,6 +74,7 @@ int synth_request_region(unsigned long start, unsigned long n);
 int synth_release_region(unsigned long start, unsigned long n);
 int synth_add(struct spk_synth *in_synth);
 void synth_remove(struct spk_synth *in_synth);
+struct spk_synth *synth_current(void);
 
 extern struct speakup_info_t speakup_info;
 
index 25f259ee4ffc74990e5a19c8560840ca9a59058e..3568bfb89912c3316d649b6c19223f4206936457 100644 (file)
@@ -481,4 +481,10 @@ void synth_remove(struct spk_synth *in_synth)
 }
 EXPORT_SYMBOL_GPL(synth_remove);
 
+struct spk_synth *synth_current(void)
+{
+       return synth;
+}
+EXPORT_SYMBOL_GPL(synth_current);
+
 short spk_punc_masks[] = { 0, SOME, MOST, PUNC, PUNC | B_SYM };
index 804daf83be35172ecda66b35a9bbfd7e6e4fc5c2..064d0db4c51ef14af59f908a32df3c94fa768714 100644 (file)
@@ -3513,6 +3513,7 @@ static int vchiq_probe(struct platform_device *pdev)
        struct device_node *fw_node;
        const struct of_device_id *of_id;
        struct vchiq_drvdata *drvdata;
+       struct device *vchiq_dev;
        int err;
 
        of_id = of_match_node(vchiq_of_match, pdev->dev.of_node);
@@ -3547,9 +3548,12 @@ static int vchiq_probe(struct platform_device *pdev)
                goto failed_platform_init;
        }
 
-       if (IS_ERR(device_create(vchiq_class, &pdev->dev, vchiq_devid,
-                                NULL, "vchiq")))
+       vchiq_dev = device_create(vchiq_class, &pdev->dev, vchiq_devid, NULL,
+                                 "vchiq");
+       if (IS_ERR(vchiq_dev)) {
+               err = PTR_ERR(vchiq_dev);
                goto failed_device_create;
+       }
 
        vchiq_debugfs_init();
 
index b370985b58a101f65e561c4cdbc43d51a26d1e19..c6bb4aaf9bd02fc18b6ca9e1bbc7a37e5724c805 100644 (file)
@@ -1033,8 +1033,6 @@ static void vnt_interrupt_process(struct vnt_private *priv)
                return;
        }
 
-       MACvIntDisable(priv->PortOffset);
-
        spin_lock_irqsave(&priv->lock, flags);
 
        /* Read low level stats */
@@ -1122,8 +1120,6 @@ static void vnt_interrupt_process(struct vnt_private *priv)
        }
 
        spin_unlock_irqrestore(&priv->lock, flags);
-
-       MACvIntEnable(priv->PortOffset, IMR_MASK_VALUE);
 }
 
 static void vnt_interrupt_work(struct work_struct *work)
@@ -1133,14 +1129,17 @@ static void vnt_interrupt_work(struct work_struct *work)
 
        if (priv->vif)
                vnt_interrupt_process(priv);
+
+       MACvIntEnable(priv->PortOffset, IMR_MASK_VALUE);
 }
 
 static irqreturn_t vnt_interrupt(int irq,  void *arg)
 {
        struct vnt_private *priv = arg;
 
-       if (priv->vif)
-               schedule_work(&priv->interrupt_work);
+       schedule_work(&priv->interrupt_work);
+
+       MACvIntDisable(priv->PortOffset);
 
        return IRQ_HANDLED;
 }
index db5df3d548188b6c440db8928ac157b5512174bb..3bdd56a1021b26d6e74ff98ad6f0e25f25f5de08 100644 (file)
@@ -49,11 +49,6 @@ struct ar933x_uart_port {
        struct clk              *clk;
 };
 
-static inline bool ar933x_uart_console_enabled(void)
-{
-       return IS_ENABLED(CONFIG_SERIAL_AR933X_CONSOLE);
-}
-
 static inline unsigned int ar933x_uart_read(struct ar933x_uart_port *up,
                                            int offset)
 {
@@ -508,6 +503,7 @@ static const struct uart_ops ar933x_uart_ops = {
        .verify_port    = ar933x_uart_verify_port,
 };
 
+#ifdef CONFIG_SERIAL_AR933X_CONSOLE
 static struct ar933x_uart_port *
 ar933x_console_ports[CONFIG_SERIAL_AR933X_NR_UARTS];
 
@@ -604,14 +600,7 @@ static struct console ar933x_uart_console = {
        .index          = -1,
        .data           = &ar933x_uart_driver,
 };
-
-static void ar933x_uart_add_console_port(struct ar933x_uart_port *up)
-{
-       if (!ar933x_uart_console_enabled())
-               return;
-
-       ar933x_console_ports[up->port.line] = up;
-}
+#endif /* CONFIG_SERIAL_AR933X_CONSOLE */
 
 static struct uart_driver ar933x_uart_driver = {
        .owner          = THIS_MODULE,
@@ -700,7 +689,9 @@ static int ar933x_uart_probe(struct platform_device *pdev)
        baud = ar933x_uart_get_baud(port->uartclk, 0, AR933X_UART_MAX_STEP);
        up->max_baud = min_t(unsigned int, baud, AR933X_UART_MAX_BAUD);
 
-       ar933x_uart_add_console_port(up);
+#ifdef CONFIG_SERIAL_AR933X_CONSOLE
+       ar933x_console_ports[up->port.line] = up;
+#endif
 
        ret = uart_add_one_port(&ar933x_uart_driver, &up->port);
        if (ret)
@@ -749,8 +740,9 @@ static int __init ar933x_uart_init(void)
 {
        int ret;
 
-       if (ar933x_uart_console_enabled())
-               ar933x_uart_driver.cons = &ar933x_uart_console;
+#ifdef CONFIG_SERIAL_AR933X_CONSOLE
+       ar933x_uart_driver.cons = &ar933x_uart_console;
+#endif
 
        ret = uart_register_driver(&ar933x_uart_driver);
        if (ret)
index 05147fe243434a52e4ca827227d95dc1f6a0f2e6..0b4f3690532145da4228b8b8255f0d5da928a31f 100644 (file)
@@ -166,6 +166,8 @@ struct atmel_uart_port {
        unsigned int            pending_status;
        spinlock_t              lock_suspended;
 
+       bool                    hd_start_rx;    /* can start RX during half-duplex operation */
+
        /* ISO7816 */
        unsigned int            fidi_min;
        unsigned int            fidi_max;
@@ -231,6 +233,13 @@ static inline void atmel_uart_write_char(struct uart_port *port, u8 value)
        __raw_writeb(value, port->membase + ATMEL_US_THR);
 }
 
+static inline int atmel_uart_is_half_duplex(struct uart_port *port)
+{
+       return ((port->rs485.flags & SER_RS485_ENABLED) &&
+               !(port->rs485.flags & SER_RS485_RX_DURING_TX)) ||
+               (port->iso7816.flags & SER_ISO7816_ENABLED);
+}
+
 #ifdef CONFIG_SERIAL_ATMEL_PDC
 static bool atmel_use_pdc_rx(struct uart_port *port)
 {
@@ -608,10 +617,9 @@ static void atmel_stop_tx(struct uart_port *port)
        /* Disable interrupts */
        atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask);
 
-       if (((port->rs485.flags & SER_RS485_ENABLED) &&
-            !(port->rs485.flags & SER_RS485_RX_DURING_TX)) ||
-           port->iso7816.flags & SER_ISO7816_ENABLED)
+       if (atmel_uart_is_half_duplex(port))
                atmel_start_rx(port);
+
 }
 
 /*
@@ -628,9 +636,7 @@ static void atmel_start_tx(struct uart_port *port)
                return;
 
        if (atmel_use_pdc_tx(port) || atmel_use_dma_tx(port))
-               if (((port->rs485.flags & SER_RS485_ENABLED) &&
-                    !(port->rs485.flags & SER_RS485_RX_DURING_TX)) ||
-                   port->iso7816.flags & SER_ISO7816_ENABLED)
+               if (atmel_uart_is_half_duplex(port))
                        atmel_stop_rx(port);
 
        if (atmel_use_pdc_tx(port))
@@ -928,11 +934,14 @@ static void atmel_complete_tx_dma(void *arg)
         */
        if (!uart_circ_empty(xmit))
                atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx);
-       else if (((port->rs485.flags & SER_RS485_ENABLED) &&
-                 !(port->rs485.flags & SER_RS485_RX_DURING_TX)) ||
-                port->iso7816.flags & SER_ISO7816_ENABLED) {
-               /* DMA done, stop TX, start RX for RS485 */
-               atmel_start_rx(port);
+       else if (atmel_uart_is_half_duplex(port)) {
+               /*
+                * DMA done, re-enable TXEMPTY and signal that we can stop
+                * TX and start RX for RS485
+                */
+               atmel_port->hd_start_rx = true;
+               atmel_uart_writel(port, ATMEL_US_IER,
+                                 atmel_port->tx_done_mask);
        }
 
        spin_unlock_irqrestore(&port->lock, flags);
@@ -1288,6 +1297,10 @@ static int atmel_prepare_rx_dma(struct uart_port *port)
                                         sg_dma_len(&atmel_port->sg_rx)/2,
                                         DMA_DEV_TO_MEM,
                                         DMA_PREP_INTERRUPT);
+       if (!desc) {
+               dev_err(port->dev, "Preparing DMA cyclic failed\n");
+               goto chan_err;
+       }
        desc->callback = atmel_complete_rx_dma;
        desc->callback_param = port;
        atmel_port->desc_rx = desc;
@@ -1376,9 +1389,20 @@ atmel_handle_transmit(struct uart_port *port, unsigned int pending)
        struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
 
        if (pending & atmel_port->tx_done_mask) {
-               /* Either PDC or interrupt transmission */
                atmel_uart_writel(port, ATMEL_US_IDR,
                                  atmel_port->tx_done_mask);
+
+               /* Start RX if flag was set and FIFO is empty */
+               if (atmel_port->hd_start_rx) {
+                       if (!(atmel_uart_readl(port, ATMEL_US_CSR)
+                                       & ATMEL_US_TXEMPTY))
+                               dev_warn(port->dev, "Should start RX, but TX fifo is not empty\n");
+
+                       atmel_port->hd_start_rx = false;
+                       atmel_start_rx(port);
+                       return;
+               }
+
                atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx);
        }
 }
@@ -1508,9 +1532,7 @@ static void atmel_tx_pdc(struct uart_port *port)
                atmel_uart_writel(port, ATMEL_US_IER,
                                  atmel_port->tx_done_mask);
        } else {
-               if (((port->rs485.flags & SER_RS485_ENABLED) &&
-                    !(port->rs485.flags & SER_RS485_RX_DURING_TX)) ||
-                   port->iso7816.flags & SER_ISO7816_ENABLED) {
+               if (atmel_uart_is_half_duplex(port)) {
                        /* DMA done, stop TX, start RX for RS485 */
                        atmel_start_rx(port);
                }
index 6fb312e7af713ecd3efcc4c0ef069602635f7681..bfe5e9e034ecf86b3de80476eb90a44a0d228359 100644 (file)
@@ -148,8 +148,10 @@ static int configure_kgdboc(void)
        char *cptr = config;
        struct console *cons;
 
-       if (!strlen(config) || isspace(config[0]))
+       if (!strlen(config) || isspace(config[0])) {
+               err = 0;
                goto noconfig;
+       }
 
        kgdboc_io_ops.is_console = 0;
        kgdb_tty_driver = NULL;
index f5bdde40562750c7695823e93bab799bf5593ad5..450ba6d7996c229e7e3a796439f8e6caa1dbaebb 100644 (file)
@@ -1415,6 +1415,8 @@ static int max310x_spi_probe(struct spi_device *spi)
        if (spi->dev.of_node) {
                const struct of_device_id *of_id =
                        of_match_device(max310x_dt_ids, &spi->dev);
+               if (!of_id)
+                       return -ENODEV;
 
                devtype = (struct max310x_devtype *)of_id->data;
        } else {
index 231f751d1ef48b42e4a9820c73408242862035e9..7e7b1559fa3695406ae80edda49f0f1f7634dc9a 100644 (file)
@@ -810,6 +810,9 @@ static int mvebu_uart_probe(struct platform_device *pdev)
                return -EINVAL;
        }
 
+       if (!match)
+               return -ENODEV;
+
        /* Assume that all UART ports have a DT alias or none has */
        id = of_alias_get_id(pdev->dev.of_node, "serial");
        if (!pdev->dev.of_node || id < 0)
index 27235a526cce8c4b59aa14f6764e466b10988748..4c188f4079b3ea68ee51982b41d4b14ba27567c4 100644 (file)
@@ -1686,6 +1686,10 @@ static int mxs_auart_probe(struct platform_device *pdev)
 
        s->port.mapbase = r->start;
        s->port.membase = ioremap(r->start, resource_size(r));
+       if (!s->port.membase) {
+               ret = -ENOMEM;
+               goto out_disable_clks;
+       }
        s->port.ops = &mxs_auart_ops;
        s->port.iotype = UPIO_MEM;
        s->port.fifosize = MXS_AUART_FIFO_SIZE;
index 3bcec1c20219102b277aafc72b548425df33e175..35e5f9c5d5bed48274363343366c8bd76395c500 100644 (file)
@@ -1050,7 +1050,7 @@ static int __init qcom_geni_console_setup(struct console *co, char *options)
 {
        struct uart_port *uport;
        struct qcom_geni_serial_port *port;
-       int baud;
+       int baud = 9600;
        int bits = 8;
        int parity = 'n';
        int flow = 'n';
index 635178cf3eed538aa35bf49225a4886097e2e7b0..09a183dfc52640027bf571184ee4e69e819c5951 100644 (file)
@@ -1507,7 +1507,7 @@ static int __init sc16is7xx_init(void)
        ret = i2c_add_driver(&sc16is7xx_i2c_uart_driver);
        if (ret < 0) {
                pr_err("failed to init sc16is7xx i2c --> %d\n", ret);
-               return ret;
+               goto err_i2c;
        }
 #endif
 
@@ -1515,10 +1515,18 @@ static int __init sc16is7xx_init(void)
        ret = spi_register_driver(&sc16is7xx_spi_uart_driver);
        if (ret < 0) {
                pr_err("failed to init sc16is7xx spi --> %d\n", ret);
-               return ret;
+               goto err_spi;
        }
 #endif
        return ret;
+
+err_spi:
+#ifdef CONFIG_SERIAL_SC16IS7XX_I2C
+       i2c_del_driver(&sc16is7xx_i2c_uart_driver);
+#endif
+err_i2c:
+       uart_unregister_driver(&sc16is7xx_uart);
+       return ret;
 }
 module_init(sc16is7xx_init);
 
index 060fcd42b6d560105a114c9923ce1cdcc177b696..2d1c626312cd8892d5eae0fa65e03d3347a09e81 100644 (file)
@@ -838,19 +838,9 @@ static void sci_transmit_chars(struct uart_port *port)
 
        if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
                uart_write_wakeup(port);
-       if (uart_circ_empty(xmit)) {
+       if (uart_circ_empty(xmit))
                sci_stop_tx(port);
-       } else {
-               ctrl = serial_port_in(port, SCSCR);
-
-               if (port->type != PORT_SCI) {
-                       serial_port_in(port, SCxSR); /* Dummy read */
-                       sci_clear_SCxSR(port, SCxSR_TDxE_CLEAR(port));
-               }
 
-               ctrl |= SCSCR_TIE;
-               serial_port_out(port, SCSCR, ctrl);
-       }
 }
 
 /* On SH3, SCIF may read end-of-break as a space->mark char */
index 044c3cbdcfa40664497d13bd00e607584eff99c7..a9e12b3bc31d7e19966c724b8b31ce3ac64c5242 100644 (file)
@@ -325,7 +325,7 @@ static void tty_port_shutdown(struct tty_port *port, struct tty_struct *tty)
                if (tty && C_HUPCL(tty))
                        tty_port_lower_dtr_rts(port);
 
-               if (port->ops->shutdown)
+               if (port->ops && port->ops->shutdown)
                        port->ops->shutdown(port);
        }
 out:
@@ -398,7 +398,7 @@ EXPORT_SYMBOL_GPL(tty_port_tty_wakeup);
  */
 int tty_port_carrier_raised(struct tty_port *port)
 {
-       if (port->ops->carrier_raised == NULL)
+       if (!port->ops || !port->ops->carrier_raised)
                return 1;
        return port->ops->carrier_raised(port);
 }
@@ -414,7 +414,7 @@ EXPORT_SYMBOL(tty_port_carrier_raised);
  */
 void tty_port_raise_dtr_rts(struct tty_port *port)
 {
-       if (port->ops->dtr_rts)
+       if (port->ops && port->ops->dtr_rts)
                port->ops->dtr_rts(port, 1);
 }
 EXPORT_SYMBOL(tty_port_raise_dtr_rts);
@@ -429,7 +429,7 @@ EXPORT_SYMBOL(tty_port_raise_dtr_rts);
  */
 void tty_port_lower_dtr_rts(struct tty_port *port)
 {
-       if (port->ops->dtr_rts)
+       if (port->ops && port->ops->dtr_rts)
                port->ops->dtr_rts(port, 0);
 }
 EXPORT_SYMBOL(tty_port_lower_dtr_rts);
@@ -684,7 +684,7 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty,
 
        if (!tty_port_initialized(port)) {
                clear_bit(TTY_IO_ERROR, &tty->flags);
-               if (port->ops->activate) {
+               if (port->ops && port->ops->activate) {
                        int retval = port->ops->activate(port, tty);
                        if (retval) {
                                mutex_unlock(&port->mutex);
index 739f8960811ac89d6f960a184155f4e0c602101a..ec666eb4b7b445d98cbc3ff59be63c1b7aa90437 100644 (file)
@@ -558,10 +558,8 @@ static void acm_softint(struct work_struct *work)
                clear_bit(EVENT_RX_STALL, &acm->flags);
        }
 
-       if (test_bit(EVENT_TTY_WAKEUP, &acm->flags)) {
+       if (test_and_clear_bit(EVENT_TTY_WAKEUP, &acm->flags))
                tty_port_tty_wakeup(&acm->port);
-               clear_bit(EVENT_TTY_WAKEUP, &acm->flags);
-       }
 }
 
 /*
index 48277bbc15e4d155fc9c1c7315fcf57d6347b51e..73c8e65917461f8f83d9233c96bdf0d2b8956b27 100644 (file)
@@ -145,6 +145,8 @@ enum usb_dr_mode of_usb_get_dr_mode_by_phy(struct device_node *np, int arg0)
 
        do {
                controller = of_find_node_with_property(controller, "phys");
+               if (!of_device_is_available(controller))
+                       continue;
                index = 0;
                do {
                        if (arg0 == -1) {
index 3189181bb628d921309d44296da06eff92b5d1bd..975d7c1288e36534bdd08be7e84642acdba535c7 100644 (file)
@@ -2741,6 +2741,9 @@ int usb_add_hcd(struct usb_hcd *hcd,
 
                retval = usb_phy_roothub_set_mode(hcd->phy_roothub,
                                                  PHY_MODE_USB_HOST_SS);
+               if (retval)
+                       retval = usb_phy_roothub_set_mode(hcd->phy_roothub,
+                                                         PHY_MODE_USB_HOST);
                if (retval)
                        goto err_usb_phy_roothub_power_on;
 
index fdc6e4e403e81736db077e0c7cdf212aa6da2874..8cced3609e243b186caedd3eeb79e338c6151a73 100644 (file)
@@ -29,6 +29,7 @@
 #define PCI_DEVICE_ID_INTEL_BXT_M              0x1aaa
 #define PCI_DEVICE_ID_INTEL_APL                        0x5aaa
 #define PCI_DEVICE_ID_INTEL_KBP                        0xa2b0
+#define PCI_DEVICE_ID_INTEL_CMLH               0x02ee
 #define PCI_DEVICE_ID_INTEL_GLK                        0x31aa
 #define PCI_DEVICE_ID_INTEL_CNPLP              0x9dee
 #define PCI_DEVICE_ID_INTEL_CNPH               0xa36e
@@ -305,6 +306,9 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MRFLD),
          (kernel_ulong_t) &dwc3_pci_mrfld_properties, },
 
+       { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_CMLH),
+         (kernel_ulong_t) &dwc3_pci_intel_properties, },
+
        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_SPTLP),
          (kernel_ulong_t) &dwc3_pci_intel_properties, },
 
index 75b113a5b25cb6af28a8d7776e58678f6fcf7202..f3816a5c861eeeafdf1230afc1e7ca8fe41efa55 100644 (file)
@@ -391,20 +391,20 @@ static ssize_t f_hidg_write(struct file *file, const char __user *buffer,
        req->complete = f_hidg_req_complete;
        req->context  = hidg;
 
+       spin_unlock_irqrestore(&hidg->write_spinlock, flags);
+
        status = usb_ep_queue(hidg->in_ep, req, GFP_ATOMIC);
        if (status < 0) {
                ERROR(hidg->func.config->cdev,
                        "usb_ep_queue error on int endpoint %zd\n", status);
-               goto release_write_pending_unlocked;
+               goto release_write_pending;
        } else {
                status = count;
        }
-       spin_unlock_irqrestore(&hidg->write_spinlock, flags);
 
        return status;
 release_write_pending:
        spin_lock_irqsave(&hidg->write_spinlock, flags);
-release_write_pending_unlocked:
        hidg->write_pending = 0;
        spin_unlock_irqrestore(&hidg->write_spinlock, flags);
 
index b77f3126580ebb937986e7ced5b28739dd25dea4..c2011cd7df8cf5fbf0c5a5db153c37a1b0f451a6 100644 (file)
@@ -945,6 +945,7 @@ net2272_dequeue(struct usb_ep *_ep, struct usb_request *_req)
                        break;
        }
        if (&req->req != _req) {
+               ep->stopped = stopped;
                spin_unlock_irqrestore(&ep->dev->lock, flags);
                return -EINVAL;
        }
index f63f82450bf4e4960414eb80b3020fda6bfa8c3f..898339e5df10d83d211942609a9bd695f199e787 100644 (file)
@@ -866,9 +866,6 @@ static void start_queue(struct net2280_ep *ep, u32 dmactl, u32 td_dma)
        (void) readl(&ep->dev->pci->pcimstctl);
 
        writel(BIT(DMA_START), &dma->dmastat);
-
-       if (!ep->is_in)
-               stop_out_naking(ep);
 }
 
 static void start_dma(struct net2280_ep *ep, struct net2280_request *req)
@@ -907,6 +904,7 @@ static void start_dma(struct net2280_ep *ep, struct net2280_request *req)
                        writel(BIT(DMA_START), &dma->dmastat);
                        return;
                }
+               stop_out_naking(ep);
        }
 
        tmp = dmactl_default;
@@ -1275,9 +1273,9 @@ static int net2280_dequeue(struct usb_ep *_ep, struct usb_request *_req)
                        break;
        }
        if (&req->req != _req) {
+               ep->stopped = stopped;
                spin_unlock_irqrestore(&ep->dev->lock, flags);
-               dev_err(&ep->dev->pdev->dev, "%s: Request mismatch\n",
-                                                               __func__);
+               ep_dbg(ep->dev, "%s: Request mismatch\n", __func__);
                return -EINVAL;
        }
 
index 934584f0a20a7bee30adcee141c0a7eb63a51b3f..6343fbacd2442adea634a9911bed82cf1603c417 100644 (file)
@@ -3204,6 +3204,9 @@ static int __init u132_hcd_init(void)
        printk(KERN_INFO "driver %s\n", hcd_name);
        workqueue = create_singlethread_workqueue("u132");
        retval = platform_driver_register(&u132_platform_driver);
+       if (retval)
+               destroy_workqueue(workqueue);
+
        return retval;
 }
 
index c78be578abb065af0e0715352f8c502345907ad7..d932cc31711e8a0a872efaf35d2ab347fa45106d 100644 (file)
@@ -516,7 +516,6 @@ static int xhci_do_dbc_stop(struct xhci_hcd *xhci)
                return -1;
 
        writel(0, &dbc->regs->control);
-       xhci_dbc_mem_cleanup(xhci);
        dbc->state = DS_DISABLED;
 
        return 0;
@@ -562,8 +561,10 @@ static void xhci_dbc_stop(struct xhci_hcd *xhci)
        ret = xhci_do_dbc_stop(xhci);
        spin_unlock_irqrestore(&dbc->lock, flags);
 
-       if (!ret)
+       if (!ret) {
+               xhci_dbc_mem_cleanup(xhci);
                pm_runtime_put_sync(xhci_to_hcd(xhci)->self.controller);
+       }
 }
 
 static void
index e2eece6936556b06be37e43a29a3c8554c722203..96a740543183729bb702244151ebb95d88acd97f 100644 (file)
@@ -1545,20 +1545,25 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
        port_index = max_ports;
        while (port_index--) {
                u32 t1, t2;
-
+               int retries = 10;
+retry:
                t1 = readl(ports[port_index]->addr);
                t2 = xhci_port_state_to_neutral(t1);
                portsc_buf[port_index] = 0;
 
-               /* Bail out if a USB3 port has a new device in link training */
-               if ((hcd->speed >= HCD_USB3) &&
+               /*
+                * Give a USB3 port in link training time to finish, but don't
+                * prevent suspend as port might be stuck
+                */
+               if ((hcd->speed >= HCD_USB3) && retries-- &&
                    (t1 & PORT_PLS_MASK) == XDEV_POLLING) {
-                       bus_state->bus_suspended = 0;
                        spin_unlock_irqrestore(&xhci->lock, flags);
-                       xhci_dbg(xhci, "Bus suspend bailout, port in polling\n");
-                       return -EBUSY;
+                       msleep(XHCI_PORT_POLLING_LFPS_TIME);
+                       spin_lock_irqsave(&xhci->lock, flags);
+                       xhci_dbg(xhci, "port %d polling in bus suspend, waiting\n",
+                                port_index);
+                       goto retry;
                }
-
                /* suspend ports in U0, or bail out for new connect changes */
                if ((t1 & PORT_PE) && (t1 & PORT_PLS_MASK) == XDEV_U0) {
                        if ((t1 & PORT_CSC) && wake_enabled) {
index a6e4637157799769cc0f77b6b2f815c7c4ad6490..671bce18782c5a788ad1af896ab9066fd4078839 100644 (file)
@@ -246,6 +246,7 @@ int xhci_rcar_init_quirk(struct usb_hcd *hcd)
        if (!xhci_rcar_wait_for_pll_active(hcd))
                return -ETIMEDOUT;
 
+       xhci->quirks |= XHCI_TRUST_TX_LENGTH;
        return xhci_rcar_download_firmware(hcd);
 }
 
index 40fa25c4d0419851bac800bdd74d29bb6f7e0fee..9215a28dad406a724959f0315a0444525d0edb90 100644 (file)
@@ -1647,10 +1647,13 @@ static void handle_port_status(struct xhci_hcd *xhci,
                }
        }
 
-       if ((portsc & PORT_PLC) && (portsc & PORT_PLS_MASK) == XDEV_U0 &&
-                       DEV_SUPERSPEED_ANY(portsc)) {
+       if ((portsc & PORT_PLC) &&
+           DEV_SUPERSPEED_ANY(portsc) &&
+           ((portsc & PORT_PLS_MASK) == XDEV_U0 ||
+            (portsc & PORT_PLS_MASK) == XDEV_U1 ||
+            (portsc & PORT_PLS_MASK) == XDEV_U2)) {
                xhci_dbg(xhci, "resume SS port %d finished\n", port_id);
-               /* We've just brought the device into U0 through either the
+               /* We've just brought the device into U0/1/2 through either the
                 * Resume state after a device remote wakeup, or through the
                 * U3Exit state after a host-initiated resume.  If it's a device
                 * initiated remote wake, don't pass up the link state change,
index 652dc36e30129c9f15a703b640a52c8f2685a82c..9334cdee382a67a8b783b13b9c88a2d1dd4c3da3 100644 (file)
@@ -452,6 +452,14 @@ struct xhci_op_regs {
  */
 #define XHCI_DEFAULT_BESL      4
 
+/*
+ * USB3 specification define a 360ms tPollingLFPSTiemout for USB3 ports
+ * to complete link training. usually link trainig completes much faster
+ * so check status 10 times with 36ms sleep in places we need to wait for
+ * polling to complete.
+ */
+#define XHCI_PORT_POLLING_LFPS_TIME  36
+
 /**
  * struct xhci_intr_reg - Interrupt Register Set
  * @irq_pending:       IMAN - Interrupt Management Register.  Used to enable
index 4d72b7d1d383be2643d09756f11f709a8b1906a5..04684849d68320862a4de40d5de5e5db4487cd7e 100644 (file)
@@ -547,7 +547,7 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
         */
        hub->port_swap = USB251XB_DEF_PORT_SWAP;
        of_property_for_each_u32(np, "swap-dx-lanes", prop, p, port) {
-               if ((port >= 0) && (port <= data->port_cnt))
+               if (port <= data->port_cnt)
                        hub->port_swap |= BIT(port);
        }
 
@@ -612,7 +612,7 @@ static int usb251xb_probe(struct usb251xb *hub)
                                                           dev);
        int err;
 
-       if (np) {
+       if (np && of_id) {
                err = usb251xb_get_ofdata(hub,
                                          (struct usb251xb_data *)of_id->data);
                if (err) {
index bcc23486c4ed2813da698e14faf0e11366577afb..928c2cd6fc0084ef0feb7f79edf6d577e8fa5a46 100644 (file)
@@ -6,6 +6,7 @@ config USB_MTU3
        tristate "MediaTek USB3 Dual Role controller"
        depends on USB || USB_GADGET
        depends on ARCH_MEDIATEK || COMPILE_TEST
+       depends on EXTCON || !EXTCON
        select USB_XHCI_MTK if USB_SUPPORT && USB_XHCI_HCD
        help
          Say Y or M here if your system runs on MediaTek SoCs with
index fffe23ab0189a00b1a7747662c9248cfd41770ae..979bef9bfb6bc7189e2c16d8dc00ae4ae82d4854 100644 (file)
@@ -80,6 +80,7 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x10C4, 0x804E) }, /* Software Bisque Paramount ME build-in converter */
        { USB_DEVICE(0x10C4, 0x8053) }, /* Enfora EDG1228 */
        { USB_DEVICE(0x10C4, 0x8054) }, /* Enfora GSM2228 */
+       { USB_DEVICE(0x10C4, 0x8056) }, /* Lorenz Messtechnik devices */
        { USB_DEVICE(0x10C4, 0x8066) }, /* Argussoft In-System Programmer */
        { USB_DEVICE(0x10C4, 0x806F) }, /* IMS USB to RS422 Converter Cable */
        { USB_DEVICE(0x10C4, 0x807A) }, /* Crumb128 board */
index 8f5b1747175945f8830a909803acdc10b48f7de4..1d8461ae2c340324f64c7796a5be644037f84ddb 100644 (file)
@@ -609,6 +609,8 @@ static const struct usb_device_id id_table_combined[] = {
                .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
        { USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLXM_PID),
                .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+       { USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLX_PLUS_PID) },
+       { USB_DEVICE(FTDI_VID, FTDI_NT_ORION_IO_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_SYNAPSE_SS200_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX2_PID) },
index b863bedb55a138b7a99abe0a82aab39e10b79e3f..5755f0df002589403366a75acec50a40fc86b955 100644 (file)
 /*
  * NovaTech product ids (FTDI_VID)
  */
-#define FTDI_NT_ORIONLXM_PID   0x7c90  /* OrionLXm Substation Automation Platform */
+#define FTDI_NT_ORIONLXM_PID           0x7c90  /* OrionLXm Substation Automation Platform */
+#define FTDI_NT_ORIONLX_PLUS_PID       0x7c91  /* OrionLX+ Substation Automation Platform */
+#define FTDI_NT_ORION_IO_PID           0x7c92  /* Orion I/O */
 
 /*
  * Synapse Wireless product ids (FTDI_VID)
index fc52ac75fbf66f0f2e7ac2f8951b8df6ea2ba49f..18110225d50606abaefe2e0c90490ffff0888f41 100644 (file)
@@ -366,8 +366,6 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport,
        if (!urbtrack)
                return -ENOMEM;
 
-       kref_get(&mos_parport->ref_count);
-       urbtrack->mos_parport = mos_parport;
        urbtrack->urb = usb_alloc_urb(0, GFP_ATOMIC);
        if (!urbtrack->urb) {
                kfree(urbtrack);
@@ -388,6 +386,8 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport,
                             usb_sndctrlpipe(usbdev, 0),
                             (unsigned char *)urbtrack->setup,
                             NULL, 0, async_complete, urbtrack);
+       kref_get(&mos_parport->ref_count);
+       urbtrack->mos_parport = mos_parport;
        kref_init(&urbtrack->ref_count);
        INIT_LIST_HEAD(&urbtrack->urblist_entry);
 
index 11b21d9410f35306d299339d7a29554aa9cf47e3..83869065b8022ba68b145db6756cc6f2f9e3b941 100644 (file)
@@ -246,6 +246,7 @@ static void option_instat_callback(struct urb *urb);
 #define QUECTEL_PRODUCT_EC25                   0x0125
 #define QUECTEL_PRODUCT_BG96                   0x0296
 #define QUECTEL_PRODUCT_EP06                   0x0306
+#define QUECTEL_PRODUCT_EM12                   0x0512
 
 #define CMOTECH_VENDOR_ID                      0x16d8
 #define CMOTECH_PRODUCT_6001                   0x6001
@@ -1066,7 +1067,8 @@ static const struct usb_device_id option_ids[] = {
          .driver_info = RSVD(3) },
        { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
        { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
-       { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
+       { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000), /* SIMCom SIM5218 */
+         .driver_info = NCTRL(0) | NCTRL(1) | NCTRL(2) | NCTRL(3) | RSVD(4) },
        /* Quectel products using Qualcomm vendor ID */
        { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC15)},
        { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC20),
@@ -1087,6 +1089,9 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff),
          .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0, 0) },
+       { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0xff, 0xff),
+         .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
+       { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0, 0) },
        { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
        { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
        { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
@@ -1940,10 +1945,12 @@ static const struct usb_device_id option_ids[] = {
          .driver_info = RSVD(4) },
        { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff),                     /* D-Link DWM-222 */
          .driver_info = RSVD(4) },
-       { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
-       { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
-       { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */
-       { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) },                /* OLICARD300 - MT6225 */
+       { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) },    /* D-Link DWM-152/C1 */
+       { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) },    /* D-Link DWM-156/C1 */
+       { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) },    /* D-Link DWM-156/A3 */
+       { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2031, 0xff),                     /* Olicard 600 */
+         .driver_info = RSVD(4) },
+       { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) },                   /* OLICARD300 - MT6225 */
        { USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) },
        { USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) },
        { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD200, 0xff, 0xff, 0xff) },
index 0f62db091d8dab59416fb70af97a44a127503d05..a2233d72ae7c9a919d86f4e0bc39ea26d6cc6aba 100644 (file)
@@ -37,6 +37,7 @@
        S(SRC_ATTACHED),                        \
        S(SRC_STARTUP),                         \
        S(SRC_SEND_CAPABILITIES),               \
+       S(SRC_SEND_CAPABILITIES_TIMEOUT),       \
        S(SRC_NEGOTIATE_CAPABILITIES),          \
        S(SRC_TRANSITION_SUPPLY),               \
        S(SRC_READY),                           \
@@ -2966,10 +2967,34 @@ static void run_state_machine(struct tcpm_port *port)
                        /* port->hard_reset_count = 0; */
                        port->caps_count = 0;
                        port->pd_capable = true;
-                       tcpm_set_state_cond(port, hard_reset_state(port),
+                       tcpm_set_state_cond(port, SRC_SEND_CAPABILITIES_TIMEOUT,
                                            PD_T_SEND_SOURCE_CAP);
                }
                break;
+       case SRC_SEND_CAPABILITIES_TIMEOUT:
+               /*
+                * Error recovery for a PD_DATA_SOURCE_CAP reply timeout.
+                *
+                * PD 2.0 sinks are supposed to accept src-capabilities with a
+                * 3.0 header and simply ignore any src PDOs which the sink does
+                * not understand such as PPS but some 2.0 sinks instead ignore
+                * the entire PD_DATA_SOURCE_CAP message, causing contract
+                * negotiation to fail.
+                *
+                * After PD_N_HARD_RESET_COUNT hard-reset attempts, we try
+                * sending src-capabilities with a lower PD revision to
+                * make these broken sinks work.
+                */
+               if (port->hard_reset_count < PD_N_HARD_RESET_COUNT) {
+                       tcpm_set_state(port, HARD_RESET_SEND, 0);
+               } else if (port->negotiated_rev > PD_REV20) {
+                       port->negotiated_rev--;
+                       port->hard_reset_count = 0;
+                       tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0);
+               } else {
+                       tcpm_set_state(port, hard_reset_state(port), 0);
+               }
+               break;
        case SRC_NEGOTIATE_CAPABILITIES:
                ret = tcpm_pd_check_request(port);
                if (ret < 0) {
index 423208e19383c0c2cd414d3b627b8f4c48b6f67b..6770afd4076548eeb0021eef062160013b97b1ea 100644 (file)
@@ -615,8 +615,13 @@ static int wcove_typec_probe(struct platform_device *pdev)
        wcove->dev = &pdev->dev;
        wcove->regmap = pmic->regmap;
 
-       irq = regmap_irq_get_virq(pmic->irq_chip_data_chgr,
-                                 platform_get_irq(pdev, 0));
+       irq = platform_get_irq(pdev, 0);
+       if (irq < 0) {
+               dev_err(&pdev->dev, "Failed to get IRQ: %d\n", irq);
+               return irq;
+       }
+
+       irq = regmap_irq_get_virq(pmic->irq_chip_data_chgr, irq);
        if (irq < 0)
                return irq;
 
index a25659b5a5d17d97272589a2dd1ac7979ed55070..3fa20e95a6bb6446fb2c4aa3d71abf75b611ce33 100644 (file)
@@ -1661,11 +1661,11 @@ static void __init vfio_pci_fill_ids(void)
                rc = pci_add_dynid(&vfio_pci_driver, vendor, device,
                                   subvendor, subdevice, class, class_mask, 0);
                if (rc)
-                       pr_warn("failed to add dynamic id [%04hx:%04hx[%04hx:%04hx]] class %#08x/%08x (%d)\n",
+                       pr_warn("failed to add dynamic id [%04x:%04x[%04x:%04x]] class %#08x/%08x (%d)\n",
                                vendor, device, subvendor, subdevice,
                                class, class_mask, rc);
                else
-                       pr_info("add [%04hx:%04hx[%04hx:%04hx]] class %#08x/%08x\n",
+                       pr_info("add [%04x:%04x[%04x:%04x]] class %#08x/%08x\n",
                                vendor, device, subvendor, subdevice,
                                class, class_mask);
        }
index 8dbb270998f47121dc0886151cb6e47da8a8e211..6b64e45a52691ffd9dd4809d0e127fc8c450cf80 100644 (file)
@@ -1398,7 +1398,7 @@ static void tce_iommu_detach_group(void *iommu_data,
        mutex_unlock(&container->lock);
 }
 
-const struct vfio_iommu_driver_ops tce_iommu_driver_ops = {
+static const struct vfio_iommu_driver_ops tce_iommu_driver_ops = {
        .name           = "iommu-vfio-powerpc",
        .owner          = THIS_MODULE,
        .open           = tce_iommu_open,
index 73652e21efec6a28393bd979d2d42caef711b280..d0f731c9920a65a44d614181ecf3a4e4c2d90755 100644 (file)
@@ -58,12 +58,18 @@ module_param_named(disable_hugepages,
 MODULE_PARM_DESC(disable_hugepages,
                 "Disable VFIO IOMMU support for IOMMU hugepages.");
 
+static unsigned int dma_entry_limit __read_mostly = U16_MAX;
+module_param_named(dma_entry_limit, dma_entry_limit, uint, 0644);
+MODULE_PARM_DESC(dma_entry_limit,
+                "Maximum number of user DMA mappings per container (65535).");
+
 struct vfio_iommu {
        struct list_head        domain_list;
        struct vfio_domain      *external_domain; /* domain for external user */
        struct mutex            lock;
        struct rb_root          dma_list;
        struct blocking_notifier_head notifier;
+       unsigned int            dma_avail;
        bool                    v2;
        bool                    nesting;
 };
@@ -836,6 +842,7 @@ static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma)
        vfio_unlink_dma(iommu, dma);
        put_task_struct(dma->task);
        kfree(dma);
+       iommu->dma_avail++;
 }
 
 static unsigned long vfio_pgsize_bitmap(struct vfio_iommu *iommu)
@@ -1081,12 +1088,18 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
                goto out_unlock;
        }
 
+       if (!iommu->dma_avail) {
+               ret = -ENOSPC;
+               goto out_unlock;
+       }
+
        dma = kzalloc(sizeof(*dma), GFP_KERNEL);
        if (!dma) {
                ret = -ENOMEM;
                goto out_unlock;
        }
 
+       iommu->dma_avail--;
        dma->iova = iova;
        dma->vaddr = vaddr;
        dma->prot = prot;
@@ -1583,6 +1596,7 @@ static void *vfio_iommu_type1_open(unsigned long arg)
 
        INIT_LIST_HEAD(&iommu->domain_list);
        iommu->dma_list = RB_ROOT;
+       iommu->dma_avail = dma_entry_limit;
        mutex_init(&iommu->lock);
        BLOCKING_INIT_NOTIFIER_HEAD(&iommu->notifier);
 
index 5ace833de74620bf1a089186057d766e7e3def63..351af88231ada1145bfb72326f905bfaac3819ca 100644 (file)
@@ -911,8 +911,12 @@ static int vhost_new_umem_range(struct vhost_umem *umem,
                                u64 start, u64 size, u64 end,
                                u64 userspace_addr, int perm)
 {
-       struct vhost_umem_node *tmp, *node = kmalloc(sizeof(*node), GFP_ATOMIC);
+       struct vhost_umem_node *tmp, *node;
 
+       if (!size)
+               return -EFAULT;
+
+       node = kmalloc(sizeof(*node), GFP_ATOMIC);
        if (!node)
                return -ENOMEM;
 
index df7d09409efe3a9512495b6c718ba0bbbebb39b3..8ca333f21292ee7dcb611591aed0e6f03421341b 100644 (file)
 
 #define GUEST_MAPPINGS_TRIES   5
 
+#define VBG_KERNEL_REQUEST \
+       (VMMDEV_REQUESTOR_KERNEL | VMMDEV_REQUESTOR_USR_DRV | \
+        VMMDEV_REQUESTOR_CON_DONT_KNOW | VMMDEV_REQUESTOR_TRUST_NOT_GIVEN)
+
 /**
  * Reserves memory in which the VMM can relocate any guest mappings
  * that are floating around.
@@ -48,7 +52,8 @@ static void vbg_guest_mappings_init(struct vbg_dev *gdev)
        int i, rc;
 
        /* Query the required space. */
-       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HYPERVISOR_INFO);
+       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HYPERVISOR_INFO,
+                           VBG_KERNEL_REQUEST);
        if (!req)
                return;
 
@@ -135,7 +140,8 @@ static void vbg_guest_mappings_exit(struct vbg_dev *gdev)
         * Tell the host that we're going to free the memory we reserved for
         * it, the free it up. (Leak the memory if anything goes wrong here.)
         */
-       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_HYPERVISOR_INFO);
+       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_HYPERVISOR_INFO,
+                           VBG_KERNEL_REQUEST);
        if (!req)
                return;
 
@@ -172,8 +178,10 @@ static int vbg_report_guest_info(struct vbg_dev *gdev)
        struct vmmdev_guest_info2 *req2 = NULL;
        int rc, ret = -ENOMEM;
 
-       req1 = vbg_req_alloc(sizeof(*req1), VMMDEVREQ_REPORT_GUEST_INFO);
-       req2 = vbg_req_alloc(sizeof(*req2), VMMDEVREQ_REPORT_GUEST_INFO2);
+       req1 = vbg_req_alloc(sizeof(*req1), VMMDEVREQ_REPORT_GUEST_INFO,
+                            VBG_KERNEL_REQUEST);
+       req2 = vbg_req_alloc(sizeof(*req2), VMMDEVREQ_REPORT_GUEST_INFO2,
+                            VBG_KERNEL_REQUEST);
        if (!req1 || !req2)
                goto out_free;
 
@@ -187,8 +195,8 @@ static int vbg_report_guest_info(struct vbg_dev *gdev)
        req2->additions_minor = VBG_VERSION_MINOR;
        req2->additions_build = VBG_VERSION_BUILD;
        req2->additions_revision = VBG_SVN_REV;
-       /* (no features defined yet) */
-       req2->additions_features = 0;
+       req2->additions_features =
+               VMMDEV_GUEST_INFO2_ADDITIONS_FEATURES_REQUESTOR_INFO;
        strlcpy(req2->name, VBG_VERSION_STRING,
                sizeof(req2->name));
 
@@ -230,7 +238,8 @@ static int vbg_report_driver_status(struct vbg_dev *gdev, bool active)
        struct vmmdev_guest_status *req;
        int rc;
 
-       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_REPORT_GUEST_STATUS);
+       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_REPORT_GUEST_STATUS,
+                           VBG_KERNEL_REQUEST);
        if (!req)
                return -ENOMEM;
 
@@ -423,7 +432,8 @@ static int vbg_heartbeat_host_config(struct vbg_dev *gdev, bool enabled)
        struct vmmdev_heartbeat *req;
        int rc;
 
-       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_HEARTBEAT_CONFIGURE);
+       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_HEARTBEAT_CONFIGURE,
+                           VBG_KERNEL_REQUEST);
        if (!req)
                return -ENOMEM;
 
@@ -457,7 +467,8 @@ static int vbg_heartbeat_init(struct vbg_dev *gdev)
 
        gdev->guest_heartbeat_req = vbg_req_alloc(
                                        sizeof(*gdev->guest_heartbeat_req),
-                                       VMMDEVREQ_GUEST_HEARTBEAT);
+                                       VMMDEVREQ_GUEST_HEARTBEAT,
+                                       VBG_KERNEL_REQUEST);
        if (!gdev->guest_heartbeat_req)
                return -ENOMEM;
 
@@ -528,7 +539,8 @@ static int vbg_reset_host_event_filter(struct vbg_dev *gdev,
        struct vmmdev_mask *req;
        int rc;
 
-       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK);
+       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK,
+                           VBG_KERNEL_REQUEST);
        if (!req)
                return -ENOMEM;
 
@@ -567,8 +579,14 @@ static int vbg_set_session_event_filter(struct vbg_dev *gdev,
        u32 changed, previous;
        int rc, ret = 0;
 
-       /* Allocate a request buffer before taking the spinlock */
-       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK);
+       /*
+        * Allocate a request buffer before taking the spinlock, when
+        * the session is being terminated the requestor is the kernel,
+        * as we're cleaning up.
+        */
+       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK,
+                           session_termination ? VBG_KERNEL_REQUEST :
+                                                 session->requestor);
        if (!req) {
                if (!session_termination)
                        return -ENOMEM;
@@ -627,7 +645,8 @@ static int vbg_reset_host_capabilities(struct vbg_dev *gdev)
        struct vmmdev_mask *req;
        int rc;
 
-       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES);
+       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES,
+                           VBG_KERNEL_REQUEST);
        if (!req)
                return -ENOMEM;
 
@@ -662,8 +681,14 @@ static int vbg_set_session_capabilities(struct vbg_dev *gdev,
        u32 changed, previous;
        int rc, ret = 0;
 
-       /* Allocate a request buffer before taking the spinlock */
-       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES);
+       /*
+        * Allocate a request buffer before taking the spinlock, when
+        * the session is being terminated the requestor is the kernel,
+        * as we're cleaning up.
+        */
+       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES,
+                           session_termination ? VBG_KERNEL_REQUEST :
+                                                 session->requestor);
        if (!req) {
                if (!session_termination)
                        return -ENOMEM;
@@ -722,7 +747,8 @@ static int vbg_query_host_version(struct vbg_dev *gdev)
        struct vmmdev_host_version *req;
        int rc, ret;
 
-       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HOST_VERSION);
+       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HOST_VERSION,
+                           VBG_KERNEL_REQUEST);
        if (!req)
                return -ENOMEM;
 
@@ -783,19 +809,24 @@ int vbg_core_init(struct vbg_dev *gdev, u32 fixed_events)
 
        gdev->mem_balloon.get_req =
                vbg_req_alloc(sizeof(*gdev->mem_balloon.get_req),
-                             VMMDEVREQ_GET_MEMBALLOON_CHANGE_REQ);
+                             VMMDEVREQ_GET_MEMBALLOON_CHANGE_REQ,
+                             VBG_KERNEL_REQUEST);
        gdev->mem_balloon.change_req =
                vbg_req_alloc(sizeof(*gdev->mem_balloon.change_req),
-                             VMMDEVREQ_CHANGE_MEMBALLOON);
+                             VMMDEVREQ_CHANGE_MEMBALLOON,
+                             VBG_KERNEL_REQUEST);
        gdev->cancel_req =
                vbg_req_alloc(sizeof(*(gdev->cancel_req)),
-                             VMMDEVREQ_HGCM_CANCEL2);
+                             VMMDEVREQ_HGCM_CANCEL2,
+                             VBG_KERNEL_REQUEST);
        gdev->ack_events_req =
                vbg_req_alloc(sizeof(*gdev->ack_events_req),
-                             VMMDEVREQ_ACKNOWLEDGE_EVENTS);
+                             VMMDEVREQ_ACKNOWLEDGE_EVENTS,
+                             VBG_KERNEL_REQUEST);
        gdev->mouse_status_req =
                vbg_req_alloc(sizeof(*gdev->mouse_status_req),
-                             VMMDEVREQ_GET_MOUSE_STATUS);
+                             VMMDEVREQ_GET_MOUSE_STATUS,
+                             VBG_KERNEL_REQUEST);
 
        if (!gdev->mem_balloon.get_req || !gdev->mem_balloon.change_req ||
            !gdev->cancel_req || !gdev->ack_events_req ||
@@ -892,9 +923,9 @@ void vbg_core_exit(struct vbg_dev *gdev)
  * vboxguest_linux.c calls this when userspace opens the char-device.
  * Return: A pointer to the new session or an ERR_PTR on error.
  * @gdev:              The Guest extension device.
- * @user:              Set if this is a session for the vboxuser device.
+ * @requestor:         VMMDEV_REQUESTOR_* flags
  */
-struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, bool user)
+struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, u32 requestor)
 {
        struct vbg_session *session;
 
@@ -903,7 +934,7 @@ struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, bool user)
                return ERR_PTR(-ENOMEM);
 
        session->gdev = gdev;
-       session->user_session = user;
+       session->requestor = requestor;
 
        return session;
 }
@@ -924,7 +955,9 @@ void vbg_core_close_session(struct vbg_session *session)
                if (!session->hgcm_client_ids[i])
                        continue;
 
-               vbg_hgcm_disconnect(gdev, session->hgcm_client_ids[i], &rc);
+               /* requestor is kernel here, as we're cleaning up. */
+               vbg_hgcm_disconnect(gdev, VBG_KERNEL_REQUEST,
+                                   session->hgcm_client_ids[i], &rc);
        }
 
        kfree(session);
@@ -1152,7 +1185,8 @@ static int vbg_req_allowed(struct vbg_dev *gdev, struct vbg_session *session,
                return -EPERM;
        }
 
-       if (trusted_apps_only && session->user_session) {
+       if (trusted_apps_only &&
+           (session->requestor & VMMDEV_REQUESTOR_USER_DEVICE)) {
                vbg_err("Denying userspace vmm call type %#08x through vboxuser device node\n",
                        req->request_type);
                return -EPERM;
@@ -1209,8 +1243,8 @@ static int vbg_ioctl_hgcm_connect(struct vbg_dev *gdev,
        if (i >= ARRAY_SIZE(session->hgcm_client_ids))
                return -EMFILE;
 
-       ret = vbg_hgcm_connect(gdev, &conn->u.in.loc, &client_id,
-                              &conn->hdr.rc);
+       ret = vbg_hgcm_connect(gdev, session->requestor, &conn->u.in.loc,
+                              &client_id, &conn->hdr.rc);
 
        mutex_lock(&gdev->session_mutex);
        if (ret == 0 && conn->hdr.rc >= 0) {
@@ -1251,7 +1285,8 @@ static int vbg_ioctl_hgcm_disconnect(struct vbg_dev *gdev,
        if (i >= ARRAY_SIZE(session->hgcm_client_ids))
                return -EINVAL;
 
-       ret = vbg_hgcm_disconnect(gdev, client_id, &disconn->hdr.rc);
+       ret = vbg_hgcm_disconnect(gdev, session->requestor, client_id,
+                                 &disconn->hdr.rc);
 
        mutex_lock(&gdev->session_mutex);
        if (ret == 0 && disconn->hdr.rc >= 0)
@@ -1313,12 +1348,12 @@ static int vbg_ioctl_hgcm_call(struct vbg_dev *gdev,
        }
 
        if (IS_ENABLED(CONFIG_COMPAT) && f32bit)
-               ret = vbg_hgcm_call32(gdev, client_id,
+               ret = vbg_hgcm_call32(gdev, session->requestor, client_id,
                                      call->function, call->timeout_ms,
                                      VBG_IOCTL_HGCM_CALL_PARMS32(call),
                                      call->parm_count, &call->hdr.rc);
        else
-               ret = vbg_hgcm_call(gdev, client_id,
+               ret = vbg_hgcm_call(gdev, session->requestor, client_id,
                                    call->function, call->timeout_ms,
                                    VBG_IOCTL_HGCM_CALL_PARMS(call),
                                    call->parm_count, &call->hdr.rc);
@@ -1408,6 +1443,7 @@ static int vbg_ioctl_check_balloon(struct vbg_dev *gdev,
 }
 
 static int vbg_ioctl_write_core_dump(struct vbg_dev *gdev,
+                                    struct vbg_session *session,
                                     struct vbg_ioctl_write_coredump *dump)
 {
        struct vmmdev_write_core_dump *req;
@@ -1415,7 +1451,8 @@ static int vbg_ioctl_write_core_dump(struct vbg_dev *gdev,
        if (vbg_ioctl_chk(&dump->hdr, sizeof(dump->u.in), 0))
                return -EINVAL;
 
-       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_WRITE_COREDUMP);
+       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_WRITE_COREDUMP,
+                           session->requestor);
        if (!req)
                return -ENOMEM;
 
@@ -1476,7 +1513,7 @@ int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data)
        case VBG_IOCTL_CHECK_BALLOON:
                return vbg_ioctl_check_balloon(gdev, data);
        case VBG_IOCTL_WRITE_CORE_DUMP:
-               return vbg_ioctl_write_core_dump(gdev, data);
+               return vbg_ioctl_write_core_dump(gdev, session, data);
        }
 
        /* Variable sized requests. */
@@ -1508,7 +1545,8 @@ int vbg_core_set_mouse_status(struct vbg_dev *gdev, u32 features)
        struct vmmdev_mouse_status *req;
        int rc;
 
-       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_MOUSE_STATUS);
+       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_MOUSE_STATUS,
+                           VBG_KERNEL_REQUEST);
        if (!req)
                return -ENOMEM;
 
index 7ad9ec45bfa9d649627f45e9410aebff43cd22c7..4188c12b839f7e74f845cc9524c1917b188dae95 100644 (file)
@@ -154,15 +154,15 @@ struct vbg_session {
         * host. Protected by vbg_gdev.session_mutex.
         */
        u32 guest_caps;
-       /** Does this session belong to a root process or a user one? */
-       bool user_session;
+       /** VMMDEV_REQUESTOR_* flags */
+       u32 requestor;
        /** Set on CANCEL_ALL_WAITEVENTS, protected by vbg_devevent_spinlock. */
        bool cancel_waiters;
 };
 
 int  vbg_core_init(struct vbg_dev *gdev, u32 fixed_events);
 void vbg_core_exit(struct vbg_dev *gdev);
-struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, bool user);
+struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, u32 requestor);
 void vbg_core_close_session(struct vbg_session *session);
 int  vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data);
 int  vbg_core_set_mouse_status(struct vbg_dev *gdev, u32 features);
@@ -172,12 +172,13 @@ irqreturn_t vbg_core_isr(int irq, void *dev_id);
 void vbg_linux_mouse_event(struct vbg_dev *gdev);
 
 /* Private (non exported) functions form vboxguest_utils.c */
-void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type);
+void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type,
+                   u32 requestor);
 void vbg_req_free(void *req, size_t len);
 int vbg_req_perform(struct vbg_dev *gdev, void *req);
 int vbg_hgcm_call32(
-       struct vbg_dev *gdev, u32 client_id, u32 function, u32 timeout_ms,
-       struct vmmdev_hgcm_function_parameter32 *parm32, u32 parm_count,
-       int *vbox_status);
+       struct vbg_dev *gdev, u32 requestor, u32 client_id, u32 function,
+       u32 timeout_ms, struct vmmdev_hgcm_function_parameter32 *parm32,
+       u32 parm_count, int *vbox_status);
 
 #endif
index 6e2a9619192d2317f8f449fbb5f9c24d0699e3f8..6e8c0f1c1056296e983fd70af5de7c405392c3ee 100644 (file)
@@ -5,6 +5,7 @@
  * Copyright (C) 2006-2016 Oracle Corporation
  */
 
+#include <linux/cred.h>
 #include <linux/input.h>
 #include <linux/kernel.h>
 #include <linux/miscdevice.h>
@@ -28,6 +29,23 @@ static DEFINE_MUTEX(vbg_gdev_mutex);
 /** Global vbg_gdev pointer used by vbg_get/put_gdev. */
 static struct vbg_dev *vbg_gdev;
 
+static u32 vbg_misc_device_requestor(struct inode *inode)
+{
+       u32 requestor = VMMDEV_REQUESTOR_USERMODE |
+                       VMMDEV_REQUESTOR_CON_DONT_KNOW |
+                       VMMDEV_REQUESTOR_TRUST_NOT_GIVEN;
+
+       if (from_kuid(current_user_ns(), current->cred->uid) == 0)
+               requestor |= VMMDEV_REQUESTOR_USR_ROOT;
+       else
+               requestor |= VMMDEV_REQUESTOR_USR_USER;
+
+       if (in_egroup_p(inode->i_gid))
+               requestor |= VMMDEV_REQUESTOR_GRP_VBOX;
+
+       return requestor;
+}
+
 static int vbg_misc_device_open(struct inode *inode, struct file *filp)
 {
        struct vbg_session *session;
@@ -36,7 +54,7 @@ static int vbg_misc_device_open(struct inode *inode, struct file *filp)
        /* misc_open sets filp->private_data to our misc device */
        gdev = container_of(filp->private_data, struct vbg_dev, misc_device);
 
-       session = vbg_core_open_session(gdev, false);
+       session = vbg_core_open_session(gdev, vbg_misc_device_requestor(inode));
        if (IS_ERR(session))
                return PTR_ERR(session);
 
@@ -53,7 +71,8 @@ static int vbg_misc_device_user_open(struct inode *inode, struct file *filp)
        gdev = container_of(filp->private_data, struct vbg_dev,
                            misc_device_user);
 
-       session = vbg_core_open_session(gdev, false);
+       session = vbg_core_open_session(gdev, vbg_misc_device_requestor(inode) |
+                                             VMMDEV_REQUESTOR_USER_DEVICE);
        if (IS_ERR(session))
                return PTR_ERR(session);
 
@@ -115,7 +134,8 @@ static long vbg_misc_device_ioctl(struct file *filp, unsigned int req,
                         req == VBG_IOCTL_VMMDEV_REQUEST_BIG;
 
        if (is_vmmdev_req)
-               buf = vbg_req_alloc(size, VBG_IOCTL_HDR_TYPE_DEFAULT);
+               buf = vbg_req_alloc(size, VBG_IOCTL_HDR_TYPE_DEFAULT,
+                                   session->requestor);
        else
                buf = kmalloc(size, GFP_KERNEL);
        if (!buf)
index bf4474214b4d31bb708c3d9c302d6ce415e17c7a..75fd140b02ff8aa41816a1f0284a4926c214df7e 100644 (file)
@@ -62,7 +62,8 @@ VBG_LOG(vbg_err, pr_err);
 VBG_LOG(vbg_debug, pr_debug);
 #endif
 
-void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type)
+void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type,
+                   u32 requestor)
 {
        struct vmmdev_request_header *req;
        int order = get_order(PAGE_ALIGN(len));
@@ -78,7 +79,7 @@ void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type)
        req->request_type = req_type;
        req->rc = VERR_GENERAL_FAILURE;
        req->reserved1 = 0;
-       req->reserved2 = 0;
+       req->requestor = requestor;
 
        return req;
 }
@@ -119,7 +120,7 @@ static bool hgcm_req_done(struct vbg_dev *gdev,
        return done;
 }
 
-int vbg_hgcm_connect(struct vbg_dev *gdev,
+int vbg_hgcm_connect(struct vbg_dev *gdev, u32 requestor,
                     struct vmmdev_hgcm_service_location *loc,
                     u32 *client_id, int *vbox_status)
 {
@@ -127,7 +128,7 @@ int vbg_hgcm_connect(struct vbg_dev *gdev,
        int rc;
 
        hgcm_connect = vbg_req_alloc(sizeof(*hgcm_connect),
-                                    VMMDEVREQ_HGCM_CONNECT);
+                                    VMMDEVREQ_HGCM_CONNECT, requestor);
        if (!hgcm_connect)
                return -ENOMEM;
 
@@ -153,13 +154,15 @@ int vbg_hgcm_connect(struct vbg_dev *gdev,
 }
 EXPORT_SYMBOL(vbg_hgcm_connect);
 
-int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 client_id, int *vbox_status)
+int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 requestor,
+                       u32 client_id, int *vbox_status)
 {
        struct vmmdev_hgcm_disconnect *hgcm_disconnect = NULL;
        int rc;
 
        hgcm_disconnect = vbg_req_alloc(sizeof(*hgcm_disconnect),
-                                       VMMDEVREQ_HGCM_DISCONNECT);
+                                       VMMDEVREQ_HGCM_DISCONNECT,
+                                       requestor);
        if (!hgcm_disconnect)
                return -ENOMEM;
 
@@ -593,9 +596,10 @@ static int hgcm_call_copy_back_result(
        return 0;
 }
 
-int vbg_hgcm_call(struct vbg_dev *gdev, u32 client_id, u32 function,
-                 u32 timeout_ms, struct vmmdev_hgcm_function_parameter *parms,
-                 u32 parm_count, int *vbox_status)
+int vbg_hgcm_call(struct vbg_dev *gdev, u32 requestor, u32 client_id,
+                 u32 function, u32 timeout_ms,
+                 struct vmmdev_hgcm_function_parameter *parms, u32 parm_count,
+                 int *vbox_status)
 {
        struct vmmdev_hgcm_call *call;
        void **bounce_bufs = NULL;
@@ -615,7 +619,7 @@ int vbg_hgcm_call(struct vbg_dev *gdev, u32 client_id, u32 function,
                goto free_bounce_bufs;
        }
 
-       call = vbg_req_alloc(size, VMMDEVREQ_HGCM_CALL);
+       call = vbg_req_alloc(size, VMMDEVREQ_HGCM_CALL, requestor);
        if (!call) {
                ret = -ENOMEM;
                goto free_bounce_bufs;
@@ -647,9 +651,9 @@ EXPORT_SYMBOL(vbg_hgcm_call);
 
 #ifdef CONFIG_COMPAT
 int vbg_hgcm_call32(
-       struct vbg_dev *gdev, u32 client_id, u32 function, u32 timeout_ms,
-       struct vmmdev_hgcm_function_parameter32 *parm32, u32 parm_count,
-       int *vbox_status)
+       struct vbg_dev *gdev, u32 requestor, u32 client_id, u32 function,
+       u32 timeout_ms, struct vmmdev_hgcm_function_parameter32 *parm32,
+       u32 parm_count, int *vbox_status)
 {
        struct vmmdev_hgcm_function_parameter *parm64 = NULL;
        u32 i, size;
@@ -689,7 +693,7 @@ int vbg_hgcm_call32(
                        goto out_free;
        }
 
-       ret = vbg_hgcm_call(gdev, client_id, function, timeout_ms,
+       ret = vbg_hgcm_call(gdev, requestor, client_id, function, timeout_ms,
                            parm64, parm_count, vbox_status);
        if (ret < 0)
                goto out_free;
index 77f0c8f8a23112f1d3c16b237514237767bab398..84834dad38d5c431d161607989080e758bb62b7b 100644 (file)
@@ -9,11 +9,10 @@
 #ifndef __VBOX_VERSION_H__
 #define __VBOX_VERSION_H__
 
-/* Last synced October 4th 2017 */
-#define VBG_VERSION_MAJOR 5
-#define VBG_VERSION_MINOR 2
+#define VBG_VERSION_MAJOR 6
+#define VBG_VERSION_MINOR 0
 #define VBG_VERSION_BUILD 0
-#define VBG_SVN_REV 68940
-#define VBG_VERSION_STRING "5.2.0"
+#define VBG_SVN_REV 127566
+#define VBG_VERSION_STRING "6.0.0"
 
 #endif
index 5e2ae978935de3630cfda2184534bb838620ea6f..6337b8d75d960bdefc5c8119a187d185ff918dfb 100644 (file)
@@ -98,8 +98,8 @@ struct vmmdev_request_header {
        s32 rc;
        /** Reserved field no.1. MBZ. */
        u32 reserved1;
-       /** Reserved field no.2. MBZ. */
-       u32 reserved2;
+       /** IN: Requestor information (VMMDEV_REQUESTOR_*) */
+       u32 requestor;
 };
 VMMDEV_ASSERT_SIZE(vmmdev_request_header, 24);
 
@@ -247,6 +247,8 @@ struct vmmdev_guest_info {
 };
 VMMDEV_ASSERT_SIZE(vmmdev_guest_info, 24 + 8);
 
+#define VMMDEV_GUEST_INFO2_ADDITIONS_FEATURES_REQUESTOR_INFO   BIT(0)
+
 /** struct vmmdev_guestinfo2 - Guest information report, version 2. */
 struct vmmdev_guest_info2 {
        /** Header. */
@@ -259,7 +261,7 @@ struct vmmdev_guest_info2 {
        u32 additions_build;
        /** SVN revision. */
        u32 additions_revision;
-       /** Feature mask, currently unused. */
+       /** Feature mask. */
        u32 additions_features;
        /**
         * The intentional meaning of this field was:
index d0584c040c60f3a8f1a8b48004ec66074eebdcc8..7a0398bb84f77e520aeb8113c2ac77ef7ef6e0f2 100644 (file)
@@ -255,9 +255,11 @@ void vp_del_vqs(struct virtio_device *vdev)
        for (i = 0; i < vp_dev->msix_used_vectors; ++i)
                free_irq(pci_irq_vector(vp_dev->pci_dev, i), vp_dev);
 
-       for (i = 0; i < vp_dev->msix_vectors; i++)
-               if (vp_dev->msix_affinity_masks[i])
-                       free_cpumask_var(vp_dev->msix_affinity_masks[i]);
+       if (vp_dev->msix_affinity_masks) {
+               for (i = 0; i < vp_dev->msix_vectors; i++)
+                       if (vp_dev->msix_affinity_masks[i])
+                               free_cpumask_var(vp_dev->msix_affinity_masks[i]);
+       }
 
        if (vp_dev->msix_enabled) {
                /* Disable the vector used for configuration */
index 18846afb39da189e3f0dd0168727e0cf2865549e..5df92c308286dc0f5afe203cf279adb3d8af8185 100644 (file)
@@ -882,6 +882,8 @@ static struct virtqueue *vring_create_virtqueue_split(
                                          GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
                if (queue)
                        break;
+               if (!may_reduce_num)
+                       return NULL;
        }
 
        if (!num)
index de01a6d0059dc4adcb98a24197750f72b0b4ceaf..a1c61e351d3f7ee5cb8e82ba4a391559e6a8aef2 100644 (file)
@@ -140,8 +140,7 @@ static int privcmd_buf_mmap(struct file *file, struct vm_area_struct *vma)
        if (!(vma->vm_flags & VM_SHARED))
                return -EINVAL;
 
-       vma_priv = kzalloc(sizeof(*vma_priv) + count * sizeof(void *),
-                          GFP_KERNEL);
+       vma_priv = kzalloc(struct_size(vma_priv, pages, count), GFP_KERNEL);
        if (!vma_priv)
                return -ENOMEM;
 
index c3e201025ef015b49703cf311e71f1d1f041ec6e..0782ff3c227352e7b92b595960cc62aee5c2c4ce 100644 (file)
@@ -622,9 +622,7 @@ static int xenbus_file_open(struct inode *inode, struct file *filp)
        if (xen_store_evtchn == 0)
                return -ENOENT;
 
-       nonseekable_open(inode, filp);
-
-       filp->f_mode &= ~FMODE_ATOMIC_POS; /* cdev-style semantics */
+       stream_open(inode, filp);
 
        u = kzalloc(sizeof(*u), GFP_KERNEL);
        if (u == NULL)
index ca08c83168f5fbf1f7f6b52c8c3ff769bf70cf04..0b37867b5c202332b66ba5bede2a31e4287a23e0 100644 (file)
@@ -1515,8 +1515,8 @@ static int afs_fs_setattr_size64(struct afs_fs_cursor *fc, struct iattr *attr)
 
        xdr_encode_AFS_StoreStatus(&bp, attr);
 
-       *bp++ = 0;                              /* position of start of write */
-       *bp++ = 0;
+       *bp++ = htonl(attr->ia_size >> 32);     /* position of start of write */
+       *bp++ = htonl((u32) attr->ia_size);
        *bp++ = 0;                              /* size of write */
        *bp++ = 0;
        *bp++ = htonl(attr->ia_size >> 32);     /* new file length */
@@ -1564,7 +1564,7 @@ static int afs_fs_setattr_size(struct afs_fs_cursor *fc, struct iattr *attr)
 
        xdr_encode_AFS_StoreStatus(&bp, attr);
 
-       *bp++ = 0;                              /* position of start of write */
+       *bp++ = htonl(attr->ia_size);           /* position of start of write */
        *bp++ = 0;                              /* size of write */
        *bp++ = htonl(attr->ia_size);           /* new file length */
 
index 2c588f9bbbda226ec64fa0670e9c92c700f259e6..c14001b42d200ffe0daa63c287e02fbab5f213a4 100644 (file)
@@ -610,6 +610,7 @@ static long afs_wait_for_call_to_complete(struct afs_call *call,
        bool stalled = false;
        u64 rtt;
        u32 life, last_life;
+       bool rxrpc_complete = false;
 
        DECLARE_WAITQUEUE(myself, current);
 
@@ -621,7 +622,7 @@ static long afs_wait_for_call_to_complete(struct afs_call *call,
                rtt2 = 2;
 
        timeout = rtt2;
-       last_life = rxrpc_kernel_check_life(call->net->socket, call->rxcall);
+       rxrpc_kernel_check_life(call->net->socket, call->rxcall, &last_life);
 
        add_wait_queue(&call->waitq, &myself);
        for (;;) {
@@ -639,7 +640,12 @@ static long afs_wait_for_call_to_complete(struct afs_call *call,
                if (afs_check_call_state(call, AFS_CALL_COMPLETE))
                        break;
 
-               life = rxrpc_kernel_check_life(call->net->socket, call->rxcall);
+               if (!rxrpc_kernel_check_life(call->net->socket, call->rxcall, &life)) {
+                       /* rxrpc terminated the call. */
+                       rxrpc_complete = true;
+                       break;
+               }
+
                if (timeout == 0 &&
                    life == last_life && signal_pending(current)) {
                        if (stalled)
@@ -663,12 +669,16 @@ static long afs_wait_for_call_to_complete(struct afs_call *call,
        remove_wait_queue(&call->waitq, &myself);
        __set_current_state(TASK_RUNNING);
 
-       /* Kill off the call if it's still live. */
        if (!afs_check_call_state(call, AFS_CALL_COMPLETE)) {
-               _debug("call interrupted");
-               if (rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
-                                           RX_USER_ABORT, -EINTR, "KWI"))
-                       afs_set_call_complete(call, -EINTR, 0);
+               if (rxrpc_complete) {
+                       afs_set_call_complete(call, call->error, call->abort_code);
+               } else {
+                       /* Kill off the call if it's still live. */
+                       _debug("call interrupted");
+                       if (rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
+                                                   RX_USER_ABORT, -EINTR, "KWI"))
+                               afs_set_call_complete(call, -EINTR, 0);
+               }
        }
 
        spin_lock_bh(&call->state_lock);
index 5aa57929e8c23559c41b8a875f3ea2db43a364dc..6e97a42d24d130471a97a28510ec3712605c50cd 100644 (file)
@@ -1514,7 +1514,7 @@ static int yfs_fs_setattr_size(struct afs_fs_cursor *fc, struct iattr *attr)
        bp = xdr_encode_u32(bp, 0); /* RPC flags */
        bp = xdr_encode_YFSFid(bp, &vnode->fid);
        bp = xdr_encode_YFS_StoreStatus(bp, attr);
-       bp = xdr_encode_u64(bp, 0);             /* position of start of write */
+       bp = xdr_encode_u64(bp, attr->ia_size); /* position of start of write */
        bp = xdr_encode_u64(bp, 0);             /* size of write */
        bp = xdr_encode_u64(bp, attr->ia_size); /* new file length */
        yfs_check_req(call, bp);
index 38b741aef0bf5a93513f1ad1f8ab61b9de7c8078..3490d1fa0e16f4f1f189727661e18696ab3a7a08 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -181,7 +181,7 @@ struct poll_iocb {
        struct file             *file;
        struct wait_queue_head  *head;
        __poll_t                events;
-       bool                    woken;
+       bool                    done;
        bool                    cancelled;
        struct wait_queue_entry wait;
        struct work_struct      work;
@@ -204,8 +204,7 @@ struct aio_kiocb {
        struct kioctx           *ki_ctx;
        kiocb_cancel_fn         *ki_cancel;
 
-       struct iocb __user      *ki_user_iocb;  /* user's aiocb */
-       __u64                   ki_user_data;   /* user's data for completion */
+       struct io_event         ki_res;
 
        struct list_head        ki_list;        /* the aio core uses this
                                                 * for cancellation */
@@ -1022,6 +1021,9 @@ static bool get_reqs_available(struct kioctx *ctx)
 /* aio_get_req
  *     Allocate a slot for an aio request.
  * Returns NULL if no requests are free.
+ *
+ * The refcount is initialized to 2 - one for the async op completion,
+ * one for the synchronous code that does this.
  */
 static inline struct aio_kiocb *aio_get_req(struct kioctx *ctx)
 {
@@ -1031,10 +1033,15 @@ static inline struct aio_kiocb *aio_get_req(struct kioctx *ctx)
        if (unlikely(!req))
                return NULL;
 
+       if (unlikely(!get_reqs_available(ctx))) {
+               kmem_cache_free(kiocb_cachep, req);
+               return NULL;
+       }
+
        percpu_ref_get(&ctx->reqs);
        req->ki_ctx = ctx;
        INIT_LIST_HEAD(&req->ki_list);
-       refcount_set(&req->ki_refcnt, 0);
+       refcount_set(&req->ki_refcnt, 2);
        req->ki_eventfd = NULL;
        return req;
 }
@@ -1067,30 +1074,20 @@ static struct kioctx *lookup_ioctx(unsigned long ctx_id)
        return ret;
 }
 
-static inline void iocb_put(struct aio_kiocb *iocb)
-{
-       if (refcount_read(&iocb->ki_refcnt) == 0 ||
-           refcount_dec_and_test(&iocb->ki_refcnt)) {
-               if (iocb->ki_filp)
-                       fput(iocb->ki_filp);
-               percpu_ref_put(&iocb->ki_ctx->reqs);
-               kmem_cache_free(kiocb_cachep, iocb);
-       }
-}
-
-static void aio_fill_event(struct io_event *ev, struct aio_kiocb *iocb,
-                          long res, long res2)
+static inline void iocb_destroy(struct aio_kiocb *iocb)
 {
-       ev->obj = (u64)(unsigned long)iocb->ki_user_iocb;
-       ev->data = iocb->ki_user_data;
-       ev->res = res;
-       ev->res2 = res2;
+       if (iocb->ki_eventfd)
+               eventfd_ctx_put(iocb->ki_eventfd);
+       if (iocb->ki_filp)
+               fput(iocb->ki_filp);
+       percpu_ref_put(&iocb->ki_ctx->reqs);
+       kmem_cache_free(kiocb_cachep, iocb);
 }
 
 /* aio_complete
  *     Called when the io request on the given iocb is complete.
  */
-static void aio_complete(struct aio_kiocb *iocb, long res, long res2)
+static void aio_complete(struct aio_kiocb *iocb)
 {
        struct kioctx   *ctx = iocb->ki_ctx;
        struct aio_ring *ring;
@@ -1114,14 +1111,14 @@ static void aio_complete(struct aio_kiocb *iocb, long res, long res2)
        ev_page = kmap_atomic(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]);
        event = ev_page + pos % AIO_EVENTS_PER_PAGE;
 
-       aio_fill_event(event, iocb, res, res2);
+       *event = iocb->ki_res;
 
        kunmap_atomic(ev_page);
        flush_dcache_page(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]);
 
-       pr_debug("%p[%u]: %p: %p %Lx %lx %lx\n",
-                ctx, tail, iocb, iocb->ki_user_iocb, iocb->ki_user_data,
-                res, res2);
+       pr_debug("%p[%u]: %p: %p %Lx %Lx %Lx\n", ctx, tail, iocb,
+                (void __user *)(unsigned long)iocb->ki_res.obj,
+                iocb->ki_res.data, iocb->ki_res.res, iocb->ki_res.res2);
 
        /* after flagging the request as done, we
         * must never even look at it again
@@ -1148,10 +1145,8 @@ static void aio_complete(struct aio_kiocb *iocb, long res, long res2)
         * eventfd. The eventfd_signal() function is safe to be called
         * from IRQ context.
         */
-       if (iocb->ki_eventfd) {
+       if (iocb->ki_eventfd)
                eventfd_signal(iocb->ki_eventfd, 1);
-               eventfd_ctx_put(iocb->ki_eventfd);
-       }
 
        /*
         * We have to order our ring_info tail store above and test
@@ -1163,7 +1158,14 @@ static void aio_complete(struct aio_kiocb *iocb, long res, long res2)
 
        if (waitqueue_active(&ctx->wait))
                wake_up(&ctx->wait);
-       iocb_put(iocb);
+}
+
+static inline void iocb_put(struct aio_kiocb *iocb)
+{
+       if (refcount_dec_and_test(&iocb->ki_refcnt)) {
+               aio_complete(iocb);
+               iocb_destroy(iocb);
+       }
 }
 
 /* aio_read_events_ring
@@ -1437,7 +1439,9 @@ static void aio_complete_rw(struct kiocb *kiocb, long res, long res2)
                file_end_write(kiocb->ki_filp);
        }
 
-       aio_complete(iocb, res, res2);
+       iocb->ki_res.res = res;
+       iocb->ki_res.res2 = res2;
+       iocb_put(iocb);
 }
 
 static int aio_prep_rw(struct kiocb *req, const struct iocb *iocb)
@@ -1514,13 +1518,13 @@ static inline void aio_rw_done(struct kiocb *req, ssize_t ret)
        }
 }
 
-static ssize_t aio_read(struct kiocb *req, const struct iocb *iocb,
+static int aio_read(struct kiocb *req, const struct iocb *iocb,
                        bool vectored, bool compat)
 {
        struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
        struct iov_iter iter;
        struct file *file;
-       ssize_t ret;
+       int ret;
 
        ret = aio_prep_rw(req, iocb);
        if (ret)
@@ -1542,13 +1546,13 @@ static ssize_t aio_read(struct kiocb *req, const struct iocb *iocb,
        return ret;
 }
 
-static ssize_t aio_write(struct kiocb *req, const struct iocb *iocb,
+static int aio_write(struct kiocb *req, const struct iocb *iocb,
                         bool vectored, bool compat)
 {
        struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
        struct iov_iter iter;
        struct file *file;
-       ssize_t ret;
+       int ret;
 
        ret = aio_prep_rw(req, iocb);
        if (ret)
@@ -1585,11 +1589,10 @@ static ssize_t aio_write(struct kiocb *req, const struct iocb *iocb,
 
 static void aio_fsync_work(struct work_struct *work)
 {
-       struct fsync_iocb *req = container_of(work, struct fsync_iocb, work);
-       int ret;
+       struct aio_kiocb *iocb = container_of(work, struct aio_kiocb, fsync.work);
 
-       ret = vfs_fsync(req->file, req->datasync);
-       aio_complete(container_of(req, struct aio_kiocb, fsync), ret, 0);
+       iocb->ki_res.res = vfs_fsync(iocb->fsync.file, iocb->fsync.datasync);
+       iocb_put(iocb);
 }
 
 static int aio_fsync(struct fsync_iocb *req, const struct iocb *iocb,
@@ -1608,11 +1611,6 @@ static int aio_fsync(struct fsync_iocb *req, const struct iocb *iocb,
        return 0;
 }
 
-static inline void aio_poll_complete(struct aio_kiocb *iocb, __poll_t mask)
-{
-       aio_complete(iocb, mangle_poll(mask), 0);
-}
-
 static void aio_poll_complete_work(struct work_struct *work)
 {
        struct poll_iocb *req = container_of(work, struct poll_iocb, work);
@@ -1638,9 +1636,11 @@ static void aio_poll_complete_work(struct work_struct *work)
                return;
        }
        list_del_init(&iocb->ki_list);
+       iocb->ki_res.res = mangle_poll(mask);
+       req->done = true;
        spin_unlock_irq(&ctx->ctx_lock);
 
-       aio_poll_complete(iocb, mask);
+       iocb_put(iocb);
 }
 
 /* assumes we are called with irqs disabled */
@@ -1668,31 +1668,27 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
        __poll_t mask = key_to_poll(key);
        unsigned long flags;
 
-       req->woken = true;
-
        /* for instances that support it check for an event match first: */
-       if (mask) {
-               if (!(mask & req->events))
-                       return 0;
+       if (mask && !(mask & req->events))
+               return 0;
+
+       list_del_init(&req->wait.entry);
 
+       if (mask && spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) {
                /*
                 * Try to complete the iocb inline if we can. Use
                 * irqsave/irqrestore because not all filesystems (e.g. fuse)
                 * call this function with IRQs disabled and because IRQs
                 * have to be disabled before ctx_lock is obtained.
                 */
-               if (spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) {
-                       list_del(&iocb->ki_list);
-                       spin_unlock_irqrestore(&iocb->ki_ctx->ctx_lock, flags);
-
-                       list_del_init(&req->wait.entry);
-                       aio_poll_complete(iocb, mask);
-                       return 1;
-               }
+               list_del(&iocb->ki_list);
+               iocb->ki_res.res = mangle_poll(mask);
+               req->done = true;
+               spin_unlock_irqrestore(&iocb->ki_ctx->ctx_lock, flags);
+               iocb_put(iocb);
+       } else {
+               schedule_work(&req->work);
        }
-
-       list_del_init(&req->wait.entry);
-       schedule_work(&req->work);
        return 1;
 }
 
@@ -1719,11 +1715,12 @@ aio_poll_queue_proc(struct file *file, struct wait_queue_head *head,
        add_wait_queue(head, &pt->iocb->poll.wait);
 }
 
-static ssize_t aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
+static int aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
 {
        struct kioctx *ctx = aiocb->ki_ctx;
        struct poll_iocb *req = &aiocb->poll;
        struct aio_poll_table apt;
+       bool cancel = false;
        __poll_t mask;
 
        /* reject any unknown events outside the normal event mask. */
@@ -1737,7 +1734,7 @@ static ssize_t aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
        req->events = demangle_poll(iocb->aio_buf) | EPOLLERR | EPOLLHUP;
 
        req->head = NULL;
-       req->woken = false;
+       req->done = false;
        req->cancelled = false;
 
        apt.pt._qproc = aio_poll_queue_proc;
@@ -1749,156 +1746,135 @@ static ssize_t aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
        INIT_LIST_HEAD(&req->wait.entry);
        init_waitqueue_func_entry(&req->wait, aio_poll_wake);
 
-       /* one for removal from waitqueue, one for this function */
-       refcount_set(&aiocb->ki_refcnt, 2);
-
        mask = vfs_poll(req->file, &apt.pt) & req->events;
-       if (unlikely(!req->head)) {
-               /* we did not manage to set up a waitqueue, done */
-               goto out;
-       }
-
        spin_lock_irq(&ctx->ctx_lock);
-       spin_lock(&req->head->lock);
-       if (req->woken) {
-               /* wake_up context handles the rest */
-               mask = 0;
+       if (likely(req->head)) {
+               spin_lock(&req->head->lock);
+               if (unlikely(list_empty(&req->wait.entry))) {
+                       if (apt.error)
+                               cancel = true;
+                       apt.error = 0;
+                       mask = 0;
+               }
+               if (mask || apt.error) {
+                       list_del_init(&req->wait.entry);
+               } else if (cancel) {
+                       WRITE_ONCE(req->cancelled, true);
+               } else if (!req->done) { /* actually waiting for an event */
+                       list_add_tail(&aiocb->ki_list, &ctx->active_reqs);
+                       aiocb->ki_cancel = aio_poll_cancel;
+               }
+               spin_unlock(&req->head->lock);
+       }
+       if (mask) { /* no async, we'd stolen it */
+               aiocb->ki_res.res = mangle_poll(mask);
                apt.error = 0;
-       } else if (mask || apt.error) {
-               /* if we get an error or a mask we are done */
-               WARN_ON_ONCE(list_empty(&req->wait.entry));
-               list_del_init(&req->wait.entry);
-       } else {
-               /* actually waiting for an event */
-               list_add_tail(&aiocb->ki_list, &ctx->active_reqs);
-               aiocb->ki_cancel = aio_poll_cancel;
        }
-       spin_unlock(&req->head->lock);
        spin_unlock_irq(&ctx->ctx_lock);
-
-out:
-       if (unlikely(apt.error))
-               return apt.error;
-
        if (mask)
-               aio_poll_complete(aiocb, mask);
-       iocb_put(aiocb);
-       return 0;
+               iocb_put(aiocb);
+       return apt.error;
 }
 
 static int __io_submit_one(struct kioctx *ctx, const struct iocb *iocb,
-                          struct iocb __user *user_iocb, bool compat)
+                          struct iocb __user *user_iocb, struct aio_kiocb *req,
+                          bool compat)
 {
-       struct aio_kiocb *req;
-       ssize_t ret;
-
-       /* enforce forwards compatibility on users */
-       if (unlikely(iocb->aio_reserved2)) {
-               pr_debug("EINVAL: reserve field set\n");
-               return -EINVAL;
-       }
-
-       /* prevent overflows */
-       if (unlikely(
-           (iocb->aio_buf != (unsigned long)iocb->aio_buf) ||
-           (iocb->aio_nbytes != (size_t)iocb->aio_nbytes) ||
-           ((ssize_t)iocb->aio_nbytes < 0)
-          )) {
-               pr_debug("EINVAL: overflow check\n");
-               return -EINVAL;
-       }
-
-       if (!get_reqs_available(ctx))
-               return -EAGAIN;
-
-       ret = -EAGAIN;
-       req = aio_get_req(ctx);
-       if (unlikely(!req))
-               goto out_put_reqs_available;
-
        req->ki_filp = fget(iocb->aio_fildes);
-       ret = -EBADF;
        if (unlikely(!req->ki_filp))
-               goto out_put_req;
+               return -EBADF;
 
        if (iocb->aio_flags & IOCB_FLAG_RESFD) {
+               struct eventfd_ctx *eventfd;
                /*
                 * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an
                 * instance of the file* now. The file descriptor must be
                 * an eventfd() fd, and will be signaled for each completed
                 * event using the eventfd_signal() function.
                 */
-               req->ki_eventfd = eventfd_ctx_fdget((int) iocb->aio_resfd);
-               if (IS_ERR(req->ki_eventfd)) {
-                       ret = PTR_ERR(req->ki_eventfd);
-                       req->ki_eventfd = NULL;
-                       goto out_put_req;
-               }
+               eventfd = eventfd_ctx_fdget(iocb->aio_resfd);
+               if (IS_ERR(eventfd))
+                       return PTR_ERR(eventfd);
+
+               req->ki_eventfd = eventfd;
        }
 
-       ret = put_user(KIOCB_KEY, &user_iocb->aio_key);
-       if (unlikely(ret)) {
+       if (unlikely(put_user(KIOCB_KEY, &user_iocb->aio_key))) {
                pr_debug("EFAULT: aio_key\n");
-               goto out_put_req;
+               return -EFAULT;
        }
 
-       req->ki_user_iocb = user_iocb;
-       req->ki_user_data = iocb->aio_data;
+       req->ki_res.obj = (u64)(unsigned long)user_iocb;
+       req->ki_res.data = iocb->aio_data;
+       req->ki_res.res = 0;
+       req->ki_res.res2 = 0;
 
        switch (iocb->aio_lio_opcode) {
        case IOCB_CMD_PREAD:
-               ret = aio_read(&req->rw, iocb, false, compat);
-               break;
+               return aio_read(&req->rw, iocb, false, compat);
        case IOCB_CMD_PWRITE:
-               ret = aio_write(&req->rw, iocb, false, compat);
-               break;
+               return aio_write(&req->rw, iocb, false, compat);
        case IOCB_CMD_PREADV:
-               ret = aio_read(&req->rw, iocb, true, compat);
-               break;
+               return aio_read(&req->rw, iocb, true, compat);
        case IOCB_CMD_PWRITEV:
-               ret = aio_write(&req->rw, iocb, true, compat);
-               break;
+               return aio_write(&req->rw, iocb, true, compat);
        case IOCB_CMD_FSYNC:
-               ret = aio_fsync(&req->fsync, iocb, false);
-               break;
+               return aio_fsync(&req->fsync, iocb, false);
        case IOCB_CMD_FDSYNC:
-               ret = aio_fsync(&req->fsync, iocb, true);
-               break;
+               return aio_fsync(&req->fsync, iocb, true);
        case IOCB_CMD_POLL:
-               ret = aio_poll(req, iocb);
-               break;
+               return aio_poll(req, iocb);
        default:
                pr_debug("invalid aio operation %d\n", iocb->aio_lio_opcode);
-               ret = -EINVAL;
-               break;
+               return -EINVAL;
        }
-
-       /*
-        * If ret is 0, we'd either done aio_complete() ourselves or have
-        * arranged for that to be done asynchronously.  Anything non-zero
-        * means that we need to destroy req ourselves.
-        */
-       if (ret)
-               goto out_put_req;
-       return 0;
-out_put_req:
-       if (req->ki_eventfd)
-               eventfd_ctx_put(req->ki_eventfd);
-       iocb_put(req);
-out_put_reqs_available:
-       put_reqs_available(ctx, 1);
-       return ret;
 }
 
 static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
                         bool compat)
 {
+       struct aio_kiocb *req;
        struct iocb iocb;
+       int err;
 
        if (unlikely(copy_from_user(&iocb, user_iocb, sizeof(iocb))))
                return -EFAULT;
 
-       return __io_submit_one(ctx, &iocb, user_iocb, compat);
+       /* enforce forwards compatibility on users */
+       if (unlikely(iocb.aio_reserved2)) {
+               pr_debug("EINVAL: reserve field set\n");
+               return -EINVAL;
+       }
+
+       /* prevent overflows */
+       if (unlikely(
+           (iocb.aio_buf != (unsigned long)iocb.aio_buf) ||
+           (iocb.aio_nbytes != (size_t)iocb.aio_nbytes) ||
+           ((ssize_t)iocb.aio_nbytes < 0)
+          )) {
+               pr_debug("EINVAL: overflow check\n");
+               return -EINVAL;
+       }
+
+       req = aio_get_req(ctx);
+       if (unlikely(!req))
+               return -EAGAIN;
+
+       err = __io_submit_one(ctx, &iocb, user_iocb, req, compat);
+
+       /* Done with the synchronous reference */
+       iocb_put(req);
+
+       /*
+        * If err is 0, we'd either done aio_complete() ourselves or have
+        * arranged for that to be done asynchronously.  Anything non-zero
+        * means that we need to destroy req ourselves.
+        */
+       if (unlikely(err)) {
+               iocb_destroy(req);
+               put_reqs_available(ctx, 1);
+       }
+       return err;
 }
 
 /* sys_io_submit:
@@ -1997,24 +1973,6 @@ COMPAT_SYSCALL_DEFINE3(io_submit, compat_aio_context_t, ctx_id,
 }
 #endif
 
-/* lookup_kiocb
- *     Finds a given iocb for cancellation.
- */
-static struct aio_kiocb *
-lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb)
-{
-       struct aio_kiocb *kiocb;
-
-       assert_spin_locked(&ctx->ctx_lock);
-
-       /* TODO: use a hash or array, this sucks. */
-       list_for_each_entry(kiocb, &ctx->active_reqs, ki_list) {
-               if (kiocb->ki_user_iocb == iocb)
-                       return kiocb;
-       }
-       return NULL;
-}
-
 /* sys_io_cancel:
  *     Attempts to cancel an iocb previously passed to io_submit.  If
  *     the operation is successfully cancelled, the resulting event is
@@ -2032,6 +1990,7 @@ SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
        struct aio_kiocb *kiocb;
        int ret = -EINVAL;
        u32 key;
+       u64 obj = (u64)(unsigned long)iocb;
 
        if (unlikely(get_user(key, &iocb->aio_key)))
                return -EFAULT;
@@ -2043,10 +2002,13 @@ SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
                return -EINVAL;
 
        spin_lock_irq(&ctx->ctx_lock);
-       kiocb = lookup_kiocb(ctx, iocb);
-       if (kiocb) {
-               ret = kiocb->ki_cancel(&kiocb->rw);
-               list_del_init(&kiocb->ki_list);
+       /* TODO: use a hash or array, this sucks. */
+       list_for_each_entry(kiocb, &ctx->active_reqs, ki_list) {
+               if (kiocb->ki_res.obj == obj) {
+                       ret = kiocb->ki_cancel(&kiocb->rw);
+                       list_del_init(&kiocb->ki_list);
+                       break;
+               }
        }
        spin_unlock_irq(&ctx->ctx_lock);
 
index 78d3257435c00b76633ee6168a2586ccfa70118c..24615c76c1d0e20739db509d3ddde0e111994e2b 100644 (file)
@@ -307,10 +307,10 @@ static void blkdev_bio_end_io(struct bio *bio)
        struct blkdev_dio *dio = bio->bi_private;
        bool should_dirty = dio->should_dirty;
 
-       if (dio->multi_bio && !atomic_dec_and_test(&dio->ref)) {
-               if (bio->bi_status && !dio->bio.bi_status)
-                       dio->bio.bi_status = bio->bi_status;
-       } else {
+       if (bio->bi_status && !dio->bio.bi_status)
+               dio->bio.bi_status = bio->bi_status;
+
+       if (!dio->multi_bio || atomic_dec_and_test(&dio->ref)) {
                if (!dio->is_sync) {
                        struct kiocb *iocb = dio->iocb;
                        ssize_t ret;
index ec2d8919e7fb0ee63c28bcfd241d0e8a54ed05a3..cd4e693406a0e62bda2171d7d6800c8b4ef65ab0 100644 (file)
@@ -501,6 +501,16 @@ static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg)
        if (!capable(CAP_SYS_ADMIN))
                return -EPERM;
 
+       /*
+        * If the fs is mounted with nologreplay, which requires it to be
+        * mounted in RO mode as well, we can not allow discard on free space
+        * inside block groups, because log trees refer to extents that are not
+        * pinned in a block group's free space cache (pinning the extents is
+        * precisely the first phase of replaying a log tree).
+        */
+       if (btrfs_test_opt(fs_info, NOLOGREPLAY))
+               return -EROFS;
+
        rcu_read_lock();
        list_for_each_entry_rcu(device, &fs_info->fs_devices->devices,
                                dev_list) {
index dc6140013ae8194739a8aa6a387f12c35794bdf9..61d22a56c0ba4e7d43f0552854f4ac4e82443218 100644 (file)
@@ -366,11 +366,11 @@ int btrfs_subvol_inherit_props(struct btrfs_trans_handle *trans,
 
 static int prop_compression_validate(const char *value, size_t len)
 {
-       if (!strncmp("lzo", value, len))
+       if (!strncmp("lzo", value, 3))
                return 0;
-       else if (!strncmp("zlib", value, len))
+       else if (!strncmp("zlib", value, 4))
                return 0;
-       else if (!strncmp("zstd", value, len))
+       else if (!strncmp("zstd", value, 4))
                return 0;
 
        return -EINVAL;
@@ -396,7 +396,7 @@ static int prop_compression_apply(struct inode *inode,
                btrfs_set_fs_incompat(fs_info, COMPRESS_LZO);
        } else if (!strncmp("zlib", value, 4)) {
                type = BTRFS_COMPRESS_ZLIB;
-       } else if (!strncmp("zstd", value, len)) {
+       } else if (!strncmp("zstd", value, 4)) {
                type = BTRFS_COMPRESS_ZSTD;
                btrfs_set_fs_incompat(fs_info, COMPRESS_ZSTD);
        } else {
index e3346628efe2e221c844db3af6b4336d07b4f7f1..2d61ddda9bf5653fb559fb320422fd84ec470419 100644 (file)
@@ -524,6 +524,7 @@ static void ceph_i_callback(struct rcu_head *head)
        struct inode *inode = container_of(head, struct inode, i_rcu);
        struct ceph_inode_info *ci = ceph_inode(inode);
 
+       kfree(ci->i_symlink);
        kmem_cache_free(ceph_inode_cachep, ci);
 }
 
@@ -566,7 +567,6 @@ void ceph_destroy_inode(struct inode *inode)
                }
        }
 
-       kfree(ci->i_symlink);
        while ((n = rb_first(&ci->i_fragtree)) != NULL) {
                frag = rb_entry(n, struct ceph_inode_frag, node);
                rb_erase(n, &ci->i_fragtree);
index f9b71c12cc9f6d46267eaf73a801dd00715a9cf2..a05bf1d6e1d04143da40126e0a07ea927347001e 100644 (file)
@@ -559,6 +559,8 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
                        tcon->ses->server->echo_interval / HZ);
        if (tcon->snapshot_time)
                seq_printf(s, ",snapshot=%llu", tcon->snapshot_time);
+       if (tcon->handle_timeout)
+               seq_printf(s, ",handletimeout=%u", tcon->handle_timeout);
        /* convert actimeo and display it in seconds */
        seq_printf(s, ",actimeo=%lu", cifs_sb->actimeo / HZ);
 
index 38feae812b4704b315ee3adfe2a1eaa2c4740e45..5b18d45857409eb06624a894e2308e6728c39ba3 100644 (file)
  */
 #define CIFS_MAX_ACTIMEO (1 << 30)
 
+/*
+ * Max persistent and resilient handle timeout (milliseconds).
+ * Windows durable max was 960000 (16 minutes)
+ */
+#define SMB3_MAX_HANDLE_TIMEOUT 960000
+
 /*
  * MAX_REQ is the maximum number of requests that WE will send
  * on one socket concurrently.
@@ -586,6 +592,7 @@ struct smb_vol {
        struct nls_table *local_nls;
        unsigned int echo_interval; /* echo interval in secs */
        __u64 snapshot_time; /* needed for timewarp tokens */
+       __u32 handle_timeout; /* persistent and durable handle timeout in ms */
        unsigned int max_credits; /* smb3 max_credits 10 < credits < 60000 */
 };
 
@@ -1058,6 +1065,7 @@ struct cifs_tcon {
        __u32 vol_serial_number;
        __le64 vol_create_time;
        __u64 snapshot_time; /* for timewarp tokens - timestamp of snapshot */
+       __u32 handle_timeout; /* persistent and durable handle timeout in ms */
        __u32 ss_flags;         /* sector size flags */
        __u32 perf_sector_size; /* best sector size for perf */
        __u32 max_chunks;
index a8e9738db691294736105bf8a0cd03772a9a2447..4c0e44489f21497670131b5a889ae6f4eb0b843e 100644 (file)
@@ -103,7 +103,7 @@ enum {
        Opt_cruid, Opt_gid, Opt_file_mode,
        Opt_dirmode, Opt_port,
        Opt_blocksize, Opt_rsize, Opt_wsize, Opt_actimeo,
-       Opt_echo_interval, Opt_max_credits,
+       Opt_echo_interval, Opt_max_credits, Opt_handletimeout,
        Opt_snapshot,
 
        /* Mount options which take string value */
@@ -208,6 +208,7 @@ static const match_table_t cifs_mount_option_tokens = {
        { Opt_rsize, "rsize=%s" },
        { Opt_wsize, "wsize=%s" },
        { Opt_actimeo, "actimeo=%s" },
+       { Opt_handletimeout, "handletimeout=%s" },
        { Opt_echo_interval, "echo_interval=%s" },
        { Opt_max_credits, "max_credits=%s" },
        { Opt_snapshot, "snapshot=%s" },
@@ -1619,6 +1620,9 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
 
        vol->actimeo = CIFS_DEF_ACTIMEO;
 
+       /* Most clients set timeout to 0, allows server to use its default */
+       vol->handle_timeout = 0; /* See MS-SMB2 spec section 2.2.14.2.12 */
+
        /* offer SMB2.1 and later (SMB3 etc). Secure and widely accepted */
        vol->ops = &smb30_operations;
        vol->vals = &smbdefault_values;
@@ -2017,6 +2021,18 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
                                goto cifs_parse_mount_err;
                        }
                        break;
+               case Opt_handletimeout:
+                       if (get_option_ul(args, &option)) {
+                               cifs_dbg(VFS, "%s: Invalid handletimeout value\n",
+                                        __func__);
+                               goto cifs_parse_mount_err;
+                       }
+                       vol->handle_timeout = option;
+                       if (vol->handle_timeout > SMB3_MAX_HANDLE_TIMEOUT) {
+                               cifs_dbg(VFS, "Invalid handle cache timeout, longer than 16 minutes\n");
+                               goto cifs_parse_mount_err;
+                       }
+                       break;
                case Opt_echo_interval:
                        if (get_option_ul(args, &option)) {
                                cifs_dbg(VFS, "%s: Invalid echo interval value\n",
@@ -3183,6 +3199,8 @@ static int match_tcon(struct cifs_tcon *tcon, struct smb_vol *volume_info)
                return 0;
        if (tcon->snapshot_time != volume_info->snapshot_time)
                return 0;
+       if (tcon->handle_timeout != volume_info->handle_timeout)
+               return 0;
        return 1;
 }
 
@@ -3297,6 +3315,16 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb_vol *volume_info)
                        tcon->snapshot_time = volume_info->snapshot_time;
        }
 
+       if (volume_info->handle_timeout) {
+               if (ses->server->vals->protocol_id == 0) {
+                       cifs_dbg(VFS,
+                            "Use SMB2.1 or later for handle timeout option\n");
+                       rc = -EOPNOTSUPP;
+                       goto out_fail;
+               } else
+                       tcon->handle_timeout = volume_info->handle_timeout;
+       }
+
        tcon->ses = ses;
        if (volume_info->password) {
                tcon->password = kstrdup(volume_info->password, GFP_KERNEL);
index b204e84b87fb52d938dc138379f7877ffb2ba74a..54bffb2a1786d00c5becdb9c2c275c0aa5f87c99 100644 (file)
@@ -68,13 +68,15 @@ smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms,
 
 
         if (oparms->tcon->use_resilient) {
-               nr_ioctl_req.Timeout = 0; /* use server default (120 seconds) */
+               /* default timeout is 0, servers pick default (120 seconds) */
+               nr_ioctl_req.Timeout =
+                       cpu_to_le32(oparms->tcon->handle_timeout);
                nr_ioctl_req.Reserved = 0;
                rc = SMB2_ioctl(xid, oparms->tcon, fid->persistent_fid,
                        fid->volatile_fid, FSCTL_LMR_REQUEST_RESILIENCY,
                        true /* is_fsctl */,
                        (char *)&nr_ioctl_req, sizeof(nr_ioctl_req),
-                       NULL, NULL /* no return info */);
+                       CIFSMaxBufSize, NULL, NULL /* no return info */);
                if (rc == -EOPNOTSUPP) {
                        cifs_dbg(VFS,
                             "resiliency not supported by server, disabling\n");
index 1022a3771e140d819e767ba5a75677a88d65f911..00225e699d036c079441d53ee896e7abcdda1149 100644 (file)
@@ -581,7 +581,7 @@ SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon)
        rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
                        FSCTL_QUERY_NETWORK_INTERFACE_INFO, true /* is_fsctl */,
                        NULL /* no data input */, 0 /* no data input */,
-                       (char **)&out_buf, &ret_data_len);
+                       CIFSMaxBufSize, (char **)&out_buf, &ret_data_len);
        if (rc == -EOPNOTSUPP) {
                cifs_dbg(FYI,
                         "server does not support query network interfaces\n");
@@ -717,32 +717,28 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid)
        oparms.fid->mid = le64_to_cpu(o_rsp->sync_hdr.MessageId);
 #endif /* CIFS_DEBUG2 */
 
-       if (o_rsp->OplockLevel == SMB2_OPLOCK_LEVEL_LEASE)
-               oplock = smb2_parse_lease_state(server, o_rsp,
-                                               &oparms.fid->epoch,
-                                               oparms.fid->lease_key);
-       else
-               goto oshr_exit;
-
-
        memcpy(tcon->crfid.fid, pfid, sizeof(struct cifs_fid));
        tcon->crfid.tcon = tcon;
        tcon->crfid.is_valid = true;
        kref_init(&tcon->crfid.refcount);
-       kref_get(&tcon->crfid.refcount);
 
+       if (o_rsp->OplockLevel == SMB2_OPLOCK_LEVEL_LEASE) {
+               kref_get(&tcon->crfid.refcount);
+               oplock = smb2_parse_lease_state(server, o_rsp,
+                                               &oparms.fid->epoch,
+                                               oparms.fid->lease_key);
+       } else
+               goto oshr_exit;
 
        qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
        if (le32_to_cpu(qi_rsp->OutputBufferLength) < sizeof(struct smb2_file_all_info))
                goto oshr_exit;
-       rc = smb2_validate_and_copy_iov(
+       if (!smb2_validate_and_copy_iov(
                                le16_to_cpu(qi_rsp->OutputBufferOffset),
                                sizeof(struct smb2_file_all_info),
                                &rsp_iov[1], sizeof(struct smb2_file_all_info),
-                               (char *)&tcon->crfid.file_all_info);
-       if (rc)
-               goto oshr_exit;
-       tcon->crfid.file_all_info_is_valid = 1;
+                               (char *)&tcon->crfid.file_all_info))
+               tcon->crfid.file_all_info_is_valid = 1;
 
  oshr_exit:
        mutex_unlock(&tcon->crfid.fid_mutex);
@@ -1299,7 +1295,7 @@ SMB2_request_res_key(const unsigned int xid, struct cifs_tcon *tcon,
 
        rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid,
                        FSCTL_SRV_REQUEST_RESUME_KEY, true /* is_fsctl */,
-                       NULL, 0 /* no input */,
+                       NULL, 0 /* no input */, CIFSMaxBufSize,
                        (char **)&res_key, &ret_data_len);
 
        if (rc) {
@@ -1404,7 +1400,7 @@ smb2_ioctl_query_info(const unsigned int xid,
                        rc = SMB2_ioctl_init(tcon, &rqst[1],
                                             COMPOUND_FID, COMPOUND_FID,
                                             qi.info_type, true, NULL,
-                                            0);
+                                            0, CIFSMaxBufSize);
                }
        } else if (qi.flags == PASSTHRU_QUERY_INFO) {
                memset(&qi_iov, 0, sizeof(qi_iov));
@@ -1532,8 +1528,8 @@ smb2_copychunk_range(const unsigned int xid,
                rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
                        trgtfile->fid.volatile_fid, FSCTL_SRV_COPYCHUNK_WRITE,
                        true /* is_fsctl */, (char *)pcchunk,
-                       sizeof(struct copychunk_ioctl), (char **)&retbuf,
-                       &ret_data_len);
+                       sizeof(struct copychunk_ioctl), CIFSMaxBufSize,
+                       (char **)&retbuf, &ret_data_len);
                if (rc == 0) {
                        if (ret_data_len !=
                                        sizeof(struct copychunk_ioctl_rsp)) {
@@ -1693,7 +1689,7 @@ static bool smb2_set_sparse(const unsigned int xid, struct cifs_tcon *tcon,
        rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
                        cfile->fid.volatile_fid, FSCTL_SET_SPARSE,
                        true /* is_fctl */,
-                       &setsparse, 1, NULL, NULL);
+                       &setsparse, 1, CIFSMaxBufSize, NULL, NULL);
        if (rc) {
                tcon->broken_sparse_sup = true;
                cifs_dbg(FYI, "set sparse rc = %d\n", rc);
@@ -1766,7 +1762,7 @@ smb2_duplicate_extents(const unsigned int xid,
                        true /* is_fsctl */,
                        (char *)&dup_ext_buf,
                        sizeof(struct duplicate_extents_to_file),
-                       NULL,
+                       CIFSMaxBufSize, NULL,
                        &ret_data_len);
 
        if (ret_data_len > 0)
@@ -1801,7 +1797,7 @@ smb3_set_integrity(const unsigned int xid, struct cifs_tcon *tcon,
                        true /* is_fsctl */,
                        (char *)&integr_info,
                        sizeof(struct fsctl_set_integrity_information_req),
-                       NULL,
+                       CIFSMaxBufSize, NULL,
                        &ret_data_len);
 
 }
@@ -1809,6 +1805,8 @@ smb3_set_integrity(const unsigned int xid, struct cifs_tcon *tcon,
 /* GMT Token is @GMT-YYYY.MM.DD-HH.MM.SS Unicode which is 48 bytes + null */
 #define GMT_TOKEN_SIZE 50
 
+#define MIN_SNAPSHOT_ARRAY_SIZE 16 /* See MS-SMB2 section 3.3.5.15.1 */
+
 /*
  * Input buffer contains (empty) struct smb_snapshot array with size filled in
  * For output see struct SRV_SNAPSHOT_ARRAY in MS-SMB2 section 2.2.32.2
@@ -1820,13 +1818,29 @@ smb3_enum_snapshots(const unsigned int xid, struct cifs_tcon *tcon,
        char *retbuf = NULL;
        unsigned int ret_data_len = 0;
        int rc;
+       u32 max_response_size;
        struct smb_snapshot_array snapshot_in;
 
+       if (get_user(ret_data_len, (unsigned int __user *)ioc_buf))
+               return -EFAULT;
+
+       /*
+        * Note that for snapshot queries that servers like Azure expect that
+        * the first query be minimal size (and just used to get the number/size
+        * of previous versions) so response size must be specified as EXACTLY
+        * sizeof(struct snapshot_array) which is 16 when rounded up to multiple
+        * of eight bytes.
+        */
+       if (ret_data_len == 0)
+               max_response_size = MIN_SNAPSHOT_ARRAY_SIZE;
+       else
+               max_response_size = CIFSMaxBufSize;
+
        rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
                        cfile->fid.volatile_fid,
                        FSCTL_SRV_ENUMERATE_SNAPSHOTS,
                        true /* is_fsctl */,
-                       NULL, 0 /* no input data */,
+                       NULL, 0 /* no input data */, max_response_size,
                        (char **)&retbuf,
                        &ret_data_len);
        cifs_dbg(FYI, "enum snaphots ioctl returned %d and ret buflen is %d\n",
@@ -2304,7 +2318,7 @@ smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses,
                rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
                                FSCTL_DFS_GET_REFERRALS,
                                true /* is_fsctl */,
-                               (char *)dfs_req, dfs_req_size,
+                               (char *)dfs_req, dfs_req_size, CIFSMaxBufSize,
                                (char **)&dfs_rsp, &dfs_rsp_size);
        } while (rc == -EAGAIN);
 
@@ -2658,7 +2672,8 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
        rc = SMB2_ioctl_init(tcon, &rqst[num++], cfile->fid.persistent_fid,
                             cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA,
                             true /* is_fctl */, (char *)&fsctl_buf,
-                            sizeof(struct file_zero_data_information));
+                            sizeof(struct file_zero_data_information),
+                            CIFSMaxBufSize);
        if (rc)
                goto zero_range_exit;
 
@@ -2735,7 +2750,8 @@ static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
        rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
                        cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA,
                        true /* is_fctl */, (char *)&fsctl_buf,
-                       sizeof(struct file_zero_data_information), NULL, NULL);
+                       sizeof(struct file_zero_data_information),
+                       CIFSMaxBufSize, NULL, NULL);
        free_xid(xid);
        return rc;
 }
index 21ac19ff19cb2c3257f524f4aef90f0de2e8d342..21ad01d55ab2d32f0a4406c4cd0704cfab80026a 100644 (file)
@@ -1002,7 +1002,8 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
 
        rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
                FSCTL_VALIDATE_NEGOTIATE_INFO, true /* is_fsctl */,
-               (char *)pneg_inbuf, inbuflen, (char **)&pneg_rsp, &rsplen);
+               (char *)pneg_inbuf, inbuflen, CIFSMaxBufSize,
+               (char **)&pneg_rsp, &rsplen);
        if (rc == -EOPNOTSUPP) {
                /*
                 * Old Windows versions or Netapp SMB server can return
@@ -1858,8 +1859,9 @@ add_lease_context(struct TCP_Server_Info *server, struct kvec *iov,
 }
 
 static struct create_durable_v2 *
-create_durable_v2_buf(struct cifs_fid *pfid)
+create_durable_v2_buf(struct cifs_open_parms *oparms)
 {
+       struct cifs_fid *pfid = oparms->fid;
        struct create_durable_v2 *buf;
 
        buf = kzalloc(sizeof(struct create_durable_v2), GFP_KERNEL);
@@ -1873,7 +1875,14 @@ create_durable_v2_buf(struct cifs_fid *pfid)
                                (struct create_durable_v2, Name));
        buf->ccontext.NameLength = cpu_to_le16(4);
 
-       buf->dcontext.Timeout = 0; /* Should this be configurable by workload */
+       /*
+        * NB: Handle timeout defaults to 0, which allows server to choose
+        * (most servers default to 120 seconds) and most clients default to 0.
+        * This can be overridden at mount ("handletimeout=") if the user wants
+        * a different persistent (or resilient) handle timeout for all opens
+        * opens on a particular SMB3 mount.
+        */
+       buf->dcontext.Timeout = cpu_to_le32(oparms->tcon->handle_timeout);
        buf->dcontext.Flags = cpu_to_le32(SMB2_DHANDLE_FLAG_PERSISTENT);
        generate_random_uuid(buf->dcontext.CreateGuid);
        memcpy(pfid->create_guid, buf->dcontext.CreateGuid, 16);
@@ -1926,7 +1935,7 @@ add_durable_v2_context(struct kvec *iov, unsigned int *num_iovec,
        struct smb2_create_req *req = iov[0].iov_base;
        unsigned int num = *num_iovec;
 
-       iov[num].iov_base = create_durable_v2_buf(oparms->fid);
+       iov[num].iov_base = create_durable_v2_buf(oparms);
        if (iov[num].iov_base == NULL)
                return -ENOMEM;
        iov[num].iov_len = sizeof(struct create_durable_v2);
@@ -2478,7 +2487,8 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
 int
 SMB2_ioctl_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
                u64 persistent_fid, u64 volatile_fid, u32 opcode,
-               bool is_fsctl, char *in_data, u32 indatalen)
+               bool is_fsctl, char *in_data, u32 indatalen,
+               __u32 max_response_size)
 {
        struct smb2_ioctl_req *req;
        struct kvec *iov = rqst->rq_iov;
@@ -2520,16 +2530,21 @@ SMB2_ioctl_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
        req->OutputCount = 0; /* MBZ */
 
        /*
-        * Could increase MaxOutputResponse, but that would require more
-        * than one credit. Windows typically sets this smaller, but for some
+        * In most cases max_response_size is set to 16K (CIFSMaxBufSize)
+        * We Could increase default MaxOutputResponse, but that could require
+        * more credits. Windows typically sets this smaller, but for some
         * ioctls it may be useful to allow server to send more. No point
         * limiting what the server can send as long as fits in one credit
-        * Unfortunately - we can not handle more than CIFS_MAX_MSG_SIZE
-        * (by default, note that it can be overridden to make max larger)
-        * in responses (except for read responses which can be bigger.
-        * We may want to bump this limit up
+        * We can not handle more than CIFS_MAX_BUF_SIZE yet but may want
+        * to increase this limit up in the future.
+        * Note that for snapshot queries that servers like Azure expect that
+        * the first query be minimal size (and just used to get the number/size
+        * of previous versions) so response size must be specified as EXACTLY
+        * sizeof(struct snapshot_array) which is 16 when rounded up to multiple
+        * of eight bytes.  Currently that is the only case where we set max
+        * response size smaller.
         */
-       req->MaxOutputResponse = cpu_to_le32(CIFSMaxBufSize);
+       req->MaxOutputResponse = cpu_to_le32(max_response_size);
 
        if (is_fsctl)
                req->Flags = cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL);
@@ -2550,13 +2565,14 @@ SMB2_ioctl_free(struct smb_rqst *rqst)
                cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
 }
 
+
 /*
  *     SMB2 IOCTL is used for both IOCTLs and FSCTLs
  */
 int
 SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
           u64 volatile_fid, u32 opcode, bool is_fsctl,
-          char *in_data, u32 indatalen,
+          char *in_data, u32 indatalen, u32 max_out_data_len,
           char **out_data, u32 *plen /* returned data len */)
 {
        struct smb_rqst rqst;
@@ -2593,8 +2609,8 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
        rqst.rq_iov = iov;
        rqst.rq_nvec = SMB2_IOCTL_IOV_SIZE;
 
-       rc = SMB2_ioctl_init(tcon, &rqst, persistent_fid, volatile_fid,
-                            opcode, is_fsctl, in_data, indatalen);
+       rc = SMB2_ioctl_init(tcon, &rqst, persistent_fid, volatile_fid, opcode,
+                            is_fsctl, in_data, indatalen, max_out_data_len);
        if (rc)
                goto ioctl_exit;
 
@@ -2672,7 +2688,8 @@ SMB2_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
        rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid,
                        FSCTL_SET_COMPRESSION, true /* is_fsctl */,
                        (char *)&fsctl_input /* data input */,
-                       2 /* in data len */, &ret_data /* out data */, NULL);
+                       2 /* in data len */, CIFSMaxBufSize /* max out data */,
+                       &ret_data /* out data */, NULL);
 
        cifs_dbg(FYI, "set compression rc %d\n", rc);
 
index 3c32d0cfea69b0c7191336e5b38247de578ed63b..52df125e918984139b176a5f4101ae6da11c7e0f 100644 (file)
@@ -142,11 +142,12 @@ extern int SMB2_open_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
 extern void SMB2_open_free(struct smb_rqst *rqst);
 extern int SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon,
                     u64 persistent_fid, u64 volatile_fid, u32 opcode,
-                    bool is_fsctl, char *in_data, u32 indatalen,
+                    bool is_fsctl, char *in_data, u32 indatalen, u32 maxoutlen,
                     char **out_data, u32 *plen /* returned data len */);
 extern int SMB2_ioctl_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
                           u64 persistent_fid, u64 volatile_fid, u32 opcode,
-                          bool is_fsctl, char *in_data, u32 indatalen);
+                          bool is_fsctl, char *in_data, u32 indatalen,
+                          __u32 max_response_size);
 extern void SMB2_ioctl_free(struct smb_rqst *rqst);
 extern int SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
                      u64 persistent_file_id, u64 volatile_file_id);
index ca0671d55aa699df6723ffb897706b6579c68780..e5e54da1715f630cf1471e625e72045b6f31e112 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -33,6 +33,7 @@
 #include <linux/sizes.h>
 #include <linux/mmu_notifier.h>
 #include <linux/iomap.h>
+#include <asm/pgalloc.h>
 #include "internal.h"
 
 #define CREATE_TRACE_POINTS
@@ -1407,7 +1408,9 @@ static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
 {
        struct address_space *mapping = vmf->vma->vm_file->f_mapping;
        unsigned long pmd_addr = vmf->address & PMD_MASK;
+       struct vm_area_struct *vma = vmf->vma;
        struct inode *inode = mapping->host;
+       pgtable_t pgtable = NULL;
        struct page *zero_page;
        spinlock_t *ptl;
        pmd_t pmd_entry;
@@ -1422,12 +1425,22 @@ static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
        *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn,
                        DAX_PMD | DAX_ZERO_PAGE, false);
 
+       if (arch_needs_pgtable_deposit()) {
+               pgtable = pte_alloc_one(vma->vm_mm);
+               if (!pgtable)
+                       return VM_FAULT_OOM;
+       }
+
        ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
        if (!pmd_none(*(vmf->pmd))) {
                spin_unlock(ptl);
                goto fallback;
        }
 
+       if (pgtable) {
+               pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
+               mm_inc_nr_ptes(vma->vm_mm);
+       }
        pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot);
        pmd_entry = pmd_mkhuge(pmd_entry);
        set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
@@ -1436,6 +1449,8 @@ static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
        return VM_FAULT_NOPAGE;
 
 fallback:
+       if (pgtable)
+               pte_free(vma->vm_mm, pgtable);
        trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, *entry);
        return VM_FAULT_FALLBACK;
 }
index 95b5e78c22b1e98811d3aca9c64c2c5deb54c6fe..f25daa207421c50cf38b1e4771ef5ab3332e9be1 100644 (file)
@@ -163,19 +163,24 @@ static int debugfs_show_options(struct seq_file *m, struct dentry *root)
        return 0;
 }
 
-static void debugfs_evict_inode(struct inode *inode)
+static void debugfs_i_callback(struct rcu_head *head)
 {
-       truncate_inode_pages_final(&inode->i_data);
-       clear_inode(inode);
+       struct inode *inode = container_of(head, struct inode, i_rcu);
        if (S_ISLNK(inode->i_mode))
                kfree(inode->i_link);
+       free_inode_nonrcu(inode);
+}
+
+static void debugfs_destroy_inode(struct inode *inode)
+{
+       call_rcu(&inode->i_rcu, debugfs_i_callback);
 }
 
 static const struct super_operations debugfs_super_operations = {
        .statfs         = simple_statfs,
        .remount_fs     = debugfs_remount,
        .show_options   = debugfs_show_options,
-       .evict_inode    = debugfs_evict_inode,
+       .destroy_inode  = debugfs_destroy_inode,
 };
 
 static void debugfs_release_dentry(struct dentry *dentry)
index 842e8f749db64eb6ee17297e1039bb7e2c4ea2b7..570d71043acf982976d3098cf5e4beaee7532241 100644 (file)
@@ -410,7 +410,7 @@ bool fs_validate_description(const struct fs_parameter_description *desc)
                        for (param = desc->specs; param->name; param++) {
                                if (param->opt == e->opt &&
                                    param->type != fs_param_is_enum) {
-                                       pr_err("VALIDATE %s: e[%lu] enum val for %s\n",
+                                       pr_err("VALIDATE %s: e[%tu] enum val for %s\n",
                                               name, e - desc->enums, param->name);
                                        good = false;
                                }
index 8a63e52785e978a6792542d96c66cb202e5f1d1b..9971a35cf1ef66c960862ef550197814052b9267 100644 (file)
@@ -2056,10 +2056,8 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
                rem += pipe->bufs[(pipe->curbuf + idx) & (pipe->buffers - 1)].len;
 
        ret = -EINVAL;
-       if (rem < len) {
-               pipe_unlock(pipe);
-               goto out;
-       }
+       if (rem < len)
+               goto out_free;
 
        rem = len;
        while (rem) {
@@ -2077,7 +2075,9 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
                        pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
                        pipe->nrbufs--;
                } else {
-                       pipe_buf_get(pipe, ibuf);
+                       if (!pipe_buf_get(pipe, ibuf))
+                               goto out_free;
+
                        *obuf = *ibuf;
                        obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
                        obuf->len = rem;
@@ -2100,11 +2100,11 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
        ret = fuse_dev_do_write(fud, &cs, len);
 
        pipe_lock(pipe);
+out_free:
        for (idx = 0; idx < nbuf; idx++)
                pipe_buf_release(pipe, &bufs[idx]);
        pipe_unlock(pipe);
 
-out:
        kvfree(bufs);
        return ret;
 }
index ec32fece5e1e9d80e726b7e8202cf214673d7a5a..9285dd4f4b1ce3ed2601daa62cb30de6d0eed92b 100644 (file)
@@ -755,11 +755,17 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb,
                                        umode_t mode, dev_t dev)
 {
        struct inode *inode;
-       struct resv_map *resv_map;
+       struct resv_map *resv_map = NULL;
 
-       resv_map = resv_map_alloc();
-       if (!resv_map)
-               return NULL;
+       /*
+        * Reserve maps are only needed for inodes that can have associated
+        * page allocations.
+        */
+       if (S_ISREG(mode) || S_ISLNK(mode)) {
+               resv_map = resv_map_alloc();
+               if (!resv_map)
+                       return NULL;
+       }
 
        inode = new_inode(sb);
        if (inode) {
@@ -794,8 +800,10 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb,
                        break;
                }
                lockdep_annotate_inode_mutex_key(inode);
-       } else
-               kref_put(&resv_map->refs, resv_map_release);
+       } else {
+               if (resv_map)
+                       kref_put(&resv_map->refs, resv_map_release);
+       }
 
        return inode;
 }
index 6aaa30580a2b2057fca13404a44e0b3773450288..89aa8412b5f5972466e79c711cee475d8d42ebfe 100644 (file)
@@ -1022,6 +1022,8 @@ static int io_write(struct io_kiocb *req, const struct sqe_submit *s,
 
        ret = rw_verify_area(WRITE, file, &kiocb->ki_pos, iov_count);
        if (!ret) {
+               ssize_t ret2;
+
                /*
                 * Open-code file_start_write here to grab freeze protection,
                 * which will be released by another thread in
@@ -1036,7 +1038,19 @@ static int io_write(struct io_kiocb *req, const struct sqe_submit *s,
                                                SB_FREEZE_WRITE);
                }
                kiocb->ki_flags |= IOCB_WRITE;
-               io_rw_done(kiocb, call_write_iter(file, kiocb, &iter));
+
+               ret2 = call_write_iter(file, kiocb, &iter);
+               if (!force_nonblock || ret2 != -EAGAIN) {
+                       io_rw_done(kiocb, ret2);
+               } else {
+                       /*
+                        * If ->needs_lock is true, we're already in async
+                        * context.
+                        */
+                       if (!s->needs_lock)
+                               io_async_list_note(WRITE, req, iov_count);
+                       ret = -EAGAIN;
+               }
        }
 out_free:
        kfree(iovec);
@@ -1968,7 +1982,15 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
                return 0;
 
        if (sig) {
-               ret = set_user_sigmask(sig, &ksigmask, &sigsaved, sigsz);
+#ifdef CONFIG_COMPAT
+               if (in_compat_syscall())
+                       ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig,
+                                                     &ksigmask, &sigsaved, sigsz);
+               else
+#endif
+                       ret = set_user_sigmask(sig, &ksigmask,
+                                              &sigsaved, sigsz);
+
                if (ret)
                        return ret;
        }
@@ -2193,6 +2215,7 @@ static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
                        fput(ctx->user_files[i]);
 
                kfree(ctx->user_files);
+               ctx->user_files = NULL;
                ctx->nr_user_files = 0;
                return ret;
        }
@@ -2222,6 +2245,10 @@ static int io_sq_offload_start(struct io_ring_ctx *ctx,
                goto err;
 
        if (ctx->flags & IORING_SETUP_SQPOLL) {
+               ret = -EPERM;
+               if (!capable(CAP_SYS_ADMIN))
+                       goto err;
+
                if (p->flags & IORING_SETUP_SQ_AFF) {
                        int cpu;
 
index 389ea53ea487538061ff3b6da78e27df2f894ac1..bccfc40b3a74ab002e45a07149afbe09634d8f64 100644 (file)
@@ -1414,11 +1414,6 @@ void jffs2_do_clear_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f)
 
        jffs2_kill_fragtree(&f->fragtree, deleted?c:NULL);
 
-       if (f->target) {
-               kfree(f->target);
-               f->target = NULL;
-       }
-
        fds = f->dents;
        while(fds) {
                fd = fds;
index bb6ae387469f4d020424bfb13333e24d84e68123..05d892c79339f97276c81337c26929d8f53b7db2 100644 (file)
@@ -47,7 +47,10 @@ static struct inode *jffs2_alloc_inode(struct super_block *sb)
 static void jffs2_i_callback(struct rcu_head *head)
 {
        struct inode *inode = container_of(head, struct inode, i_rcu);
-       kmem_cache_free(jffs2_inode_cachep, JFFS2_INODE_INFO(inode));
+       struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
+
+       kfree(f->target);
+       kmem_cache_free(jffs2_inode_cachep, f);
 }
 
 static void jffs2_destroy_inode(struct inode *inode)
index ff6f85fb676b7c1094878b269d35f2f127ca5fb5..5196bfa7894d21c0eb1220f4d9bb9e51e2593afb 100644 (file)
@@ -329,9 +329,6 @@ ssize_t nfs42_proc_copy(struct file *src, loff_t pos_src,
        };
        ssize_t err, err2;
 
-       if (!nfs_server_capable(file_inode(dst), NFS_CAP_COPY))
-               return -EOPNOTSUPP;
-
        src_lock = nfs_get_lock_context(nfs_file_open_context(src));
        if (IS_ERR(src_lock))
                return PTR_ERR(src_lock);
index 45b2322e092d2455b508a8fdc00f5bde0b73c4e9..00d17198ee12aa7f6177bd3c1c5830fa655d1033 100644 (file)
@@ -133,8 +133,10 @@ static ssize_t nfs4_copy_file_range(struct file *file_in, loff_t pos_in,
                                    struct file *file_out, loff_t pos_out,
                                    size_t count, unsigned int flags)
 {
+       if (!nfs_server_capable(file_inode(file_out), NFS_CAP_COPY))
+               return -EOPNOTSUPP;
        if (file_inode(file_in) == file_inode(file_out))
-               return -EINVAL;
+               return -EOPNOTSUPP;
        return nfs42_proc_copy(file_in, pos_in, file_out, pos_out, count);
 }
 
index cfcabc33e24d01136ba00c336f90497f657fb0a5..602446158bfb56e1fe62b74411d276431baef8c6 100644 (file)
@@ -2589,7 +2589,7 @@ static void nfs4_xdr_enc_getacl(struct rpc_rqst *req, struct xdr_stream *xdr,
                        ARRAY_SIZE(nfs4_acl_bitmap), &hdr);
 
        rpc_prepare_reply_pages(req, args->acl_pages, 0,
-                               args->acl_len, replen);
+                               args->acl_len, replen + 1);
        encode_nops(&hdr);
 }
 
@@ -2811,7 +2811,7 @@ static void nfs4_xdr_enc_fs_locations(struct rpc_rqst *req,
        }
 
        rpc_prepare_reply_pages(req, (struct page **)&args->page, 0,
-                               PAGE_SIZE, replen);
+                               PAGE_SIZE, replen + 1);
        encode_nops(&hdr);
 }
 
index 23790c7b2289d21328db2a824eef5c6484e29089..c27ac96a95bd3535bc893493492fdc7681aba1fe 100644 (file)
@@ -2041,7 +2041,8 @@ static int nfs23_validate_mount_data(void *options,
                memcpy(sap, &data->addr, sizeof(data->addr));
                args->nfs_server.addrlen = sizeof(data->addr);
                args->nfs_server.port = ntohs(data->addr.sin_port);
-               if (!nfs_verify_server_address(sap))
+               if (sap->sa_family != AF_INET ||
+                   !nfs_verify_server_address(sap))
                        goto out_no_address;
 
                if (!(data->flags & NFS_MOUNT_TCP))
index a35259eebc56739b59bf7ffb5029e647dc11ad0f..1dc9a08e8bdc7b7c96a5668072faddf75b0285c2 100644 (file)
@@ -4719,22 +4719,23 @@ loff_t ocfs2_reflink_remap_blocks(struct inode *s_inode,
 
 /* Lock an inode and grab a bh pointing to the inode. */
 int ocfs2_reflink_inodes_lock(struct inode *s_inode,
-                             struct buffer_head **bh1,
+                             struct buffer_head **bh_s,
                              struct inode *t_inode,
-                             struct buffer_head **bh2)
+                             struct buffer_head **bh_t)
 {
-       struct inode *inode1;
-       struct inode *inode2;
+       struct inode *inode1 = s_inode;
+       struct inode *inode2 = t_inode;
        struct ocfs2_inode_info *oi1;
        struct ocfs2_inode_info *oi2;
+       struct buffer_head *bh1 = NULL;
+       struct buffer_head *bh2 = NULL;
        bool same_inode = (s_inode == t_inode);
+       bool need_swap = (inode1->i_ino > inode2->i_ino);
        int status;
 
        /* First grab the VFS and rw locks. */
        lock_two_nondirectories(s_inode, t_inode);
-       inode1 = s_inode;
-       inode2 = t_inode;
-       if (inode1->i_ino > inode2->i_ino)
+       if (need_swap)
                swap(inode1, inode2);
 
        status = ocfs2_rw_lock(inode1, 1);
@@ -4757,17 +4758,13 @@ int ocfs2_reflink_inodes_lock(struct inode *s_inode,
        trace_ocfs2_double_lock((unsigned long long)oi1->ip_blkno,
                                (unsigned long long)oi2->ip_blkno);
 
-       if (*bh1)
-               *bh1 = NULL;
-       if (*bh2)
-               *bh2 = NULL;
-
        /* We always want to lock the one with the lower lockid first. */
        if (oi1->ip_blkno > oi2->ip_blkno)
                mlog_errno(-ENOLCK);
 
        /* lock id1 */
-       status = ocfs2_inode_lock_nested(inode1, bh1, 1, OI_LS_REFLINK_TARGET);
+       status = ocfs2_inode_lock_nested(inode1, &bh1, 1,
+                                        OI_LS_REFLINK_TARGET);
        if (status < 0) {
                if (status != -ENOENT)
                        mlog_errno(status);
@@ -4776,15 +4773,25 @@ int ocfs2_reflink_inodes_lock(struct inode *s_inode,
 
        /* lock id2 */
        if (!same_inode) {
-               status = ocfs2_inode_lock_nested(inode2, bh2, 1,
+               status = ocfs2_inode_lock_nested(inode2, &bh2, 1,
                                                 OI_LS_REFLINK_TARGET);
                if (status < 0) {
                        if (status != -ENOENT)
                                mlog_errno(status);
                        goto out_cl1;
                }
-       } else
-               *bh2 = *bh1;
+       } else {
+               bh2 = bh1;
+       }
+
+       /*
+        * If we swapped inode order above, we have to swap the buffer heads
+        * before passing them back to the caller.
+        */
+       if (need_swap)
+               swap(bh1, bh2);
+       *bh_s = bh1;
+       *bh_t = bh2;
 
        trace_ocfs2_double_lock_end(
                        (unsigned long long)oi1->ip_blkno,
@@ -4794,8 +4801,7 @@ int ocfs2_reflink_inodes_lock(struct inode *s_inode,
 
 out_cl1:
        ocfs2_inode_unlock(inode1, 1);
-       brelse(*bh1);
-       *bh1 = NULL;
+       brelse(bh1);
 out_rw2:
        ocfs2_rw_unlock(inode2, 1);
 out_i2:
index 0285ce7dbd515c8c7bfd9e63f0211cabfb818801..a00350018a4792e758d5749e6294902cf32f2174 100644 (file)
--- a/fs/open.c
+++ b/fs/open.c
@@ -733,6 +733,12 @@ static int do_dentry_open(struct file *f,
                return 0;
        }
 
+       /* Any file opened for execve()/uselib() has to be a regular file. */
+       if (unlikely(f->f_flags & FMODE_EXEC && !S_ISREG(inode->i_mode))) {
+               error = -EACCES;
+               goto cleanup_file;
+       }
+
        if (f->f_mode & FMODE_WRITE && !special_file(inode->i_mode)) {
                error = get_write_access(inode);
                if (unlikely(error))
@@ -1209,3 +1215,21 @@ int nonseekable_open(struct inode *inode, struct file *filp)
 }
 
 EXPORT_SYMBOL(nonseekable_open);
+
+/*
+ * stream_open is used by subsystems that want stream-like file descriptors.
+ * Such file descriptors are not seekable and don't have notion of position
+ * (file.f_pos is always 0). Contrary to file descriptors of other regular
+ * files, .read() and .write() can run simultaneously.
+ *
+ * stream_open never fails and is marked to return int so that it could be
+ * directly used as file_operations.open .
+ */
+int stream_open(struct inode *inode, struct file *filp)
+{
+       filp->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE | FMODE_ATOMIC_POS);
+       filp->f_mode |= FMODE_STREAM;
+       return 0;
+}
+
+EXPORT_SYMBOL(stream_open);
index 070aad543382a4e30aa0bd5eef94b5dabba7219d..41065901106b09d4365ebc13ee6cfa7b6465339b 100644 (file)
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -188,9 +188,9 @@ EXPORT_SYMBOL(generic_pipe_buf_steal);
  *     in the tee() system call, when we duplicate the buffers in one
  *     pipe into another.
  */
-void generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf)
+bool generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf)
 {
-       get_page(buf->page);
+       return try_get_page(buf->page);
 }
 EXPORT_SYMBOL(generic_pipe_buf_get);
 
index ddef482f133406737e09e5df4966aea9b6ec06aa..6a803a0b75df45af049fd655a7e5a729f436708c 100644 (file)
@@ -616,24 +616,25 @@ static int proc_pid_limits(struct seq_file *m, struct pid_namespace *ns,
 static int proc_pid_syscall(struct seq_file *m, struct pid_namespace *ns,
                            struct pid *pid, struct task_struct *task)
 {
-       long nr;
-       unsigned long args[6], sp, pc;
+       struct syscall_info info;
+       u64 *args = &info.data.args[0];
        int res;
 
        res = lock_trace(task);
        if (res)
                return res;
 
-       if (task_current_syscall(task, &nr, args, 6, &sp, &pc))
+       if (task_current_syscall(task, &info))
                seq_puts(m, "running\n");
-       else if (nr < 0)
-               seq_printf(m, "%ld 0x%lx 0x%lx\n", nr, sp, pc);
+       else if (info.data.nr < 0)
+               seq_printf(m, "%d 0x%llx 0x%llx\n",
+                          info.data.nr, info.sp, info.data.instruction_pointer);
        else
                seq_printf(m,
-                      "%ld 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx\n",
-                      nr,
+                      "%d 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx\n",
+                      info.data.nr,
                       args[0], args[1], args[2], args[3], args[4], args[5],
-                      sp, pc);
+                      info.sp, info.data.instruction_pointer);
        unlock_trace(task);
 
        return 0;
index d29d869abec17c3b6d4b56d6500e9b3d510cd385..f5834488b67d564d91b5534c210fc7fcd8e95969 100644 (file)
@@ -615,7 +615,7 @@ static void __init proc_kcore_text_init(void)
 /*
  * MODULES_VADDR has no intersection with VMALLOC_ADDR.
  */
-struct kcore_list kcore_modules;
+static struct kcore_list kcore_modules;
 static void __init add_modules_range(void)
 {
        if (MODULES_VADDR != VMALLOC_START && MODULES_END != VMALLOC_END) {
index 4d598a399bbff1b32becb1cf24406f9b9e80287c..d653907275419435e4bad20de2f1a704b5c9d6c3 100644 (file)
@@ -1626,7 +1626,8 @@ static void drop_sysctl_table(struct ctl_table_header *header)
        if (--header->nreg)
                return;
 
-       put_links(header);
+       if (parent)
+               put_links(header);
        start_unregistering(header);
        if (!--header->count)
                kfree_rcu(header, rcu);
index 177ccc3d405a33b425998e400b105418d8f537a4..61b43ad7608e301336662d7cab1cf6a7bda8067c 100644 (file)
@@ -560,12 +560,13 @@ ssize_t vfs_write(struct file *file, const char __user *buf, size_t count, loff_
 
 static inline loff_t file_pos_read(struct file *file)
 {
-       return file->f_pos;
+       return file->f_mode & FMODE_STREAM ? 0 : file->f_pos;
 }
 
 static inline void file_pos_write(struct file *file, loff_t pos)
 {
-       file->f_pos = pos;
+       if ((file->f_mode & FMODE_STREAM) == 0)
+               file->f_pos = pos;
 }
 
 ssize_t ksys_read(unsigned int fd, char __user *buf, size_t count)
index 3ee7e82df48f2b14d09ddd3b4877879e90f8c418..98943d9b219c0cea1037770cbc6578970fbe69b6 100644 (file)
@@ -1593,7 +1593,11 @@ static int splice_pipe_to_pipe(struct pipe_inode_info *ipipe,
                         * Get a reference to this pipe buffer,
                         * so we can copy the contents over.
                         */
-                       pipe_buf_get(ipipe, ibuf);
+                       if (!pipe_buf_get(ipipe, ibuf)) {
+                               if (ret == 0)
+                                       ret = -EFAULT;
+                               break;
+                       }
                        *obuf = *ibuf;
 
                        /*
@@ -1667,7 +1671,11 @@ static int link_pipe(struct pipe_inode_info *ipipe,
                 * Get a reference to this pipe buffer,
                 * so we can copy the contents over.
                 */
-               pipe_buf_get(ipipe, ibuf);
+               if (!pipe_buf_get(ipipe, ibuf)) {
+                       if (ret == 0)
+                               ret = -EFAULT;
+                       break;
+               }
 
                obuf = opipe->bufs + nbuf;
                *obuf = *ibuf;
index 8dc2818fdd84990b74e07a8d479de994b8e5be9b..12628184772c04b27c975568101a4b5cddf442f8 100644 (file)
@@ -276,14 +276,12 @@ static void ubifs_i_callback(struct rcu_head *head)
 {
        struct inode *inode = container_of(head, struct inode, i_rcu);
        struct ubifs_inode *ui = ubifs_inode(inode);
+       kfree(ui->data);
        kmem_cache_free(ubifs_inode_slab, ui);
 }
 
 static void ubifs_destroy_inode(struct inode *inode)
 {
-       struct ubifs_inode *ui = ubifs_inode(inode);
-
-       kfree(ui->data);
        call_rcu(&inode->i_rcu, ubifs_i_callback);
 }
 
index 48502cb9990f184a55b780372adaef3bda406509..4637ae1ae91ca8ef6007c05ba060dd9fb208fdf1 100644 (file)
@@ -1191,7 +1191,10 @@ xfs_iread_extents(
         * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
         */
        level = be16_to_cpu(block->bb_level);
-       ASSERT(level > 0);
+       if (unlikely(level == 0)) {
+               XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
+               return -EFSCORRUPTED;
+       }
        pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
        bno = be64_to_cpu(*pp);
 
@@ -4249,9 +4252,13 @@ xfs_bmapi_write(
        struct xfs_bmbt_irec    *mval,          /* output: map values */
        int                     *nmap)          /* i/o: mval size/count */
 {
+       struct xfs_bmalloca     bma = {
+               .tp             = tp,
+               .ip             = ip,
+               .total          = total,
+       };
        struct xfs_mount        *mp = ip->i_mount;
        struct xfs_ifork        *ifp;
-       struct xfs_bmalloca     bma = { NULL }; /* args for xfs_bmap_alloc */
        xfs_fileoff_t           end;            /* end of mapped file region */
        bool                    eof = false;    /* after the end of extents */
        int                     error;          /* error return */
@@ -4319,10 +4326,6 @@ xfs_bmapi_write(
                eof = true;
        if (!xfs_iext_peek_prev_extent(ifp, &bma.icur, &bma.prev))
                bma.prev.br_startoff = NULLFILEOFF;
-       bma.tp = tp;
-       bma.ip = ip;
-       bma.total = total;
-       bma.datatype = 0;
        bma.minleft = xfs_bmapi_minleft(tp, ip, whichfork);
 
        n = 0;
index 6f94d1f7322d0a33bd00134c8136ac3831b4fe2c..117910db51b809ebeea0196182e05f0dd0c54611 100644 (file)
@@ -415,8 +415,17 @@ xchk_btree_check_owner(
        struct xfs_btree_cur    *cur = bs->cur;
        struct check_owner      *co;
 
-       if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) && bp == NULL)
+       /*
+        * In theory, xfs_btree_get_block should only give us a null buffer
+        * pointer for the root of a root-in-inode btree type, but we need
+        * to check defensively here in case the cursor state is also screwed
+        * up.
+        */
+       if (bp == NULL) {
+               if (!(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE))
+                       xchk_btree_set_corrupt(bs->sc, bs->cur, level);
                return 0;
+       }
 
        /*
         * We want to cross-reference each btree block with the bnobt
index f1260b4bfdeed62440cc238138e7fd405c2dccf1..90527b094878971f831c78daafe2483dd99e83d2 100644 (file)
@@ -574,6 +574,11 @@ xchk_da_btree(
                /* Drill another level deeper. */
                blkno = be32_to_cpu(key->before);
                level++;
+               if (level >= XFS_DA_NODE_MAXDEPTH) {
+                       /* Too deep! */
+                       xchk_da_set_corrupt(&ds, level - 1);
+                       break;
+               }
                ds.tree_level--;
                error = xchk_da_btree_block(&ds, level, blkno);
                if (error)
index 93f07edafd8183a14ca55fae7ffdfad0370ccc89..9ee2a7d02e7059f29c103da1088b7c854ca023bb 100644 (file)
@@ -161,6 +161,14 @@ xfs_ioc_trim(
                return -EPERM;
        if (!blk_queue_discard(q))
                return -EOPNOTSUPP;
+
+       /*
+        * We haven't recovered the log, so we cannot use our bnobt-guided
+        * storage zapping commands.
+        */
+       if (mp->m_flags & XFS_MOUNT_NORECOVERY)
+               return -EROFS;
+
        if (copy_from_user(&range, urange, sizeof(range)))
                return -EFAULT;
 
index 1f2e2845eb76c2c78a932c913057e1028cec2f05..a7ceae90110eded646f13acf4314131574d46a69 100644 (file)
@@ -529,18 +529,17 @@ xfs_file_dio_aio_write(
        count = iov_iter_count(from);
 
        /*
-        * If we are doing unaligned IO, wait for all other IO to drain,
-        * otherwise demote the lock if we had to take the exclusive lock
-        * for other reasons in xfs_file_aio_write_checks.
+        * If we are doing unaligned IO, we can't allow any other overlapping IO
+        * in-flight at the same time or we risk data corruption. Wait for all
+        * other IO to drain before we submit. If the IO is aligned, demote the
+        * iolock if we had to take the exclusive lock in
+        * xfs_file_aio_write_checks() for other reasons.
         */
        if (unaligned_io) {
-               /* If we are going to wait for other DIO to finish, bail */
-               if (iocb->ki_flags & IOCB_NOWAIT) {
-                       if (atomic_read(&inode->i_dio_count))
-                               return -EAGAIN;
-               } else {
-                       inode_dio_wait(inode);
-               }
+               /* unaligned dio always waits, bail */
+               if (iocb->ki_flags & IOCB_NOWAIT)
+                       return -EAGAIN;
+               inode_dio_wait(inode);
        } else if (iolock == XFS_IOLOCK_EXCL) {
                xfs_ilock_demote(ip, XFS_IOLOCK_EXCL);
                iolock = XFS_IOLOCK_SHARED;
@@ -548,6 +547,14 @@ xfs_file_dio_aio_write(
 
        trace_xfs_file_direct_write(ip, count, iocb->ki_pos);
        ret = iomap_dio_rw(iocb, from, &xfs_iomap_ops, xfs_dio_write_end_io);
+
+       /*
+        * If unaligned, this is the only IO in-flight. If it has not yet
+        * completed, wait on it before we release the iolock to prevent
+        * subsequent overlapping IO.
+        */
+       if (ret == -EIOCBQUEUED && unaligned_io)
+               inode_dio_wait(inode);
 out:
        xfs_iunlock(ip, iolock);
 
index 30b1ae53689fcffcb6a7a0b20437745c6cebd43c..c50542dc71e0b24912571eaa09039a98d3a6ef9f 100644 (file)
 
 /* Defaults for debug_level, debug and normal */
 
+#ifndef ACPI_DEBUG_DEFAULT
 #define ACPI_DEBUG_DEFAULT          (ACPI_LV_INIT | ACPI_LV_DEBUG_OBJECT | ACPI_LV_EVALUATION | ACPI_LV_REPAIR)
+#endif
+
 #define ACPI_NORMAL_DEFAULT         (ACPI_LV_INIT | ACPI_LV_DEBUG_OBJECT | ACPI_LV_REPAIR)
 #define ACPI_DEBUG_ALL              (ACPI_LV_AML_DISASSEMBLE | ACPI_LV_ALL_EXCEPTIONS | ACPI_LV_ALL)
 
index 9ff328fd946a2b8ef332b6d13bdfc07ce403f5e4..624b90b340852c65104d8de894fb7d9a5a555f53 100644 (file)
 #define ACPI_NO_ERROR_MESSAGES
 #undef ACPI_DEBUG_OUTPUT
 
+/* Use a specific bugging default separate from ACPICA */
+
+#undef ACPI_DEBUG_DEFAULT
+#define ACPI_DEBUG_DEFAULT          (ACPI_LV_INFO | ACPI_LV_REPAIR)
+
 /* External interface for __KERNEL__, stub is needed */
 
 #define ACPI_EXTERNAL_RETURN_STATUS(prototype) \
index 0c938a4354f6f58f67e0bb3555dca24460afadb7..b88239e9efe49979a5c94a3c411f8f36ba09e541 100644 (file)
@@ -105,41 +105,30 @@ void syscall_set_return_value(struct task_struct *task, struct pt_regs *regs,
  * syscall_get_arguments - extract system call parameter values
  * @task:      task of interest, must be blocked
  * @regs:      task_pt_regs() of @task
- * @i:         argument index [0,5]
- * @n:         number of arguments; n+i must be [1,6].
  * @args:      array filled with argument values
  *
- * Fetches @n arguments to the system call starting with the @i'th argument
- * (from 0 through 5).  Argument @i is stored in @args[0], and so on.
- * An arch inline version is probably optimal when @i and @n are constants.
+ * Fetches 6 arguments to the system call.  First argument is stored in
+*  @args[0], and so on.
  *
  * It's only valid to call this when @task is stopped for tracing on
  * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT.
- * It's invalid to call this with @i + @n > 6; we only support system calls
- * taking up to 6 arguments.
  */
 void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
-                          unsigned int i, unsigned int n, unsigned long *args);
+                          unsigned long *args);
 
 /**
  * syscall_set_arguments - change system call parameter value
  * @task:      task of interest, must be in system call entry tracing
  * @regs:      task_pt_regs() of @task
- * @i:         argument index [0,5]
- * @n:         number of arguments; n+i must be [1,6].
  * @args:      array of argument values to store
  *
- * Changes @n arguments to the system call starting with the @i'th argument.
- * Argument @i gets value @args[0], and so on.
- * An arch inline version is probably optimal when @i and @n are constants.
+ * Changes 6 arguments to the system call.
+ * The first argument gets value @args[0], and so on.
  *
  * It's only valid to call this when @task is stopped for tracing on
  * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT.
- * It's invalid to call this with @i + @n > 6; we only support system calls
- * taking up to 6 arguments.
  */
 void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs,
-                          unsigned int i, unsigned int n,
                           const unsigned long *args);
 
 /**
index cfb7be40bed7a55a453757b30bf20772590b718d..ce4de6b1e444a855d04cc9afe5c7ebca5833a1a6 100644 (file)
@@ -418,6 +418,8 @@ struct drm_crtc_helper_funcs {
         * Drivers can use the @old_crtc_state input parameter if the operations
         * needed to enable the CRTC don't depend solely on the new state but
         * also on the transition between the old state and the new state.
+        *
+        * This function is optional.
         */
        void (*atomic_enable)(struct drm_crtc *crtc,
                              struct drm_crtc_state *old_crtc_state);
@@ -441,6 +443,8 @@ struct drm_crtc_helper_funcs {
         * parameter @old_crtc_state which could be used to access the old
         * state. Atomic drivers should consider to use this one instead
         * of @disable.
+        *
+        * This function is optional.
         */
        void (*atomic_disable)(struct drm_crtc *crtc,
                               struct drm_crtc_state *old_crtc_state);
diff --git a/include/dt-bindings/clock/sifive-fu540-prci.h b/include/dt-bindings/clock/sifive-fu540-prci.h
new file mode 100644 (file)
index 0000000..6a0b70a
--- /dev/null
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018-2019 SiFive, Inc.
+ * Wesley Terpstra
+ * Paul Walmsley
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_SIFIVE_FU540_PRCI_H
+#define __DT_BINDINGS_CLOCK_SIFIVE_FU540_PRCI_H
+
+/* Clock indexes for use by Device Tree data and the PRCI driver */
+
+#define PRCI_CLK_COREPLL              0
+#define PRCI_CLK_DDRPLL                       1
+#define PRCI_CLK_GEMGXLPLL            2
+#define PRCI_CLK_TLCLK                3
+
+#endif
index 8063e8314eefbfbf75181622465a33f2fbe48ba8..6d487c5eba2cae612e58ef72b8712d8d97aa71b5 100644 (file)
 #define RESET_SD_EMMC_A                        44
 #define RESET_SD_EMMC_B                        45
 #define RESET_SD_EMMC_C                        46
-/*                                     47-60 */
+/*                                     47      */
+#define RESET_USB_PHY20                        48
+#define RESET_USB_PHY21                        49
+/*                                     50-60   */
 #define RESET_AUDIO_CODEC              61
 /*                                     62-63   */
 /*     RESET2                                  */
index adbcb681782604b5356a0b1db56a9a9af6177529..0071298b9b28eb41a313e7f29eb609bd84eb4785 100644 (file)
@@ -38,7 +38,7 @@ enum {
 
 int TSS_authhmac(unsigned char *digest, const unsigned char *key,
                        unsigned int keylen, unsigned char *h1,
-                       unsigned char *h2, unsigned char h3, ...);
+                       unsigned char *h2, unsigned int h3, ...);
 int TSS_checkhmac1(unsigned char *buffer,
                          const uint32_t command,
                          const unsigned char *ononce,
index bb6090aa165d362ae399194fdca8d58b7cb8f5bf..e584673c18814295e2c901ab16d69511bc5e7a37 100644 (file)
@@ -120,19 +120,23 @@ static inline bool bio_full(struct bio *bio)
        return bio->bi_vcnt >= bio->bi_max_vecs;
 }
 
-#define mp_bvec_for_each_segment(bv, bvl, i, iter_all)                 \
-       for (bv = bvec_init_iter_all(&iter_all);                        \
-               (iter_all.done < (bvl)->bv_len) &&                      \
-               (mp_bvec_next_segment((bvl), &iter_all), 1);            \
-               iter_all.done += bv->bv_len, i += 1)
+static inline bool bio_next_segment(const struct bio *bio,
+                                   struct bvec_iter_all *iter)
+{
+       if (iter->idx >= bio->bi_vcnt)
+               return false;
+
+       bvec_advance(&bio->bi_io_vec[iter->idx], iter);
+       return true;
+}
 
 /*
  * drivers should _never_ use the all version - the bio may have been split
  * before it got to the driver and the driver won't own all of it
  */
-#define bio_for_each_segment_all(bvl, bio, i, iter_all)                \
-       for (i = 0, iter_all.idx = 0; iter_all.idx < (bio)->bi_vcnt; iter_all.idx++)    \
-               mp_bvec_for_each_segment(bvl, &((bio)->bi_io_vec[iter_all.idx]), i, iter_all)
+#define bio_for_each_segment_all(bvl, bio, i, iter)                    \
+       for (i = 0, bvl = bvec_init_iter_all(&iter);                    \
+            bio_next_segment((bio), &iter); i++)
 
 static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter,
                                    unsigned bytes)
index 50fb0dee23e8662120461cd227cf11f548939339..d35b8ec1c485cba58a658b34edd2f9621cd20639 100644 (file)
@@ -34,41 +34,41 @@ static inline u32 __bitrev32(u32 x)
 
 #define __constant_bitrev32(x) \
 ({                                     \
-       u32 __x = x;                    \
-       __x = (__x >> 16) | (__x << 16);        \
-       __x = ((__x & (u32)0xFF00FF00UL) >> 8) | ((__x & (u32)0x00FF00FFUL) << 8);      \
-       __x = ((__x & (u32)0xF0F0F0F0UL) >> 4) | ((__x & (u32)0x0F0F0F0FUL) << 4);      \
-       __x = ((__x & (u32)0xCCCCCCCCUL) >> 2) | ((__x & (u32)0x33333333UL) << 2);      \
-       __x = ((__x & (u32)0xAAAAAAAAUL) >> 1) | ((__x & (u32)0x55555555UL) << 1);      \
-       __x;                                                            \
+       u32 ___x = x;                   \
+       ___x = (___x >> 16) | (___x << 16);     \
+       ___x = ((___x & (u32)0xFF00FF00UL) >> 8) | ((___x & (u32)0x00FF00FFUL) << 8);   \
+       ___x = ((___x & (u32)0xF0F0F0F0UL) >> 4) | ((___x & (u32)0x0F0F0F0FUL) << 4);   \
+       ___x = ((___x & (u32)0xCCCCCCCCUL) >> 2) | ((___x & (u32)0x33333333UL) << 2);   \
+       ___x = ((___x & (u32)0xAAAAAAAAUL) >> 1) | ((___x & (u32)0x55555555UL) << 1);   \
+       ___x;                                                           \
 })
 
 #define __constant_bitrev16(x) \
 ({                                     \
-       u16 __x = x;                    \
-       __x = (__x >> 8) | (__x << 8);  \
-       __x = ((__x & (u16)0xF0F0U) >> 4) | ((__x & (u16)0x0F0FU) << 4);        \
-       __x = ((__x & (u16)0xCCCCU) >> 2) | ((__x & (u16)0x3333U) << 2);        \
-       __x = ((__x & (u16)0xAAAAU) >> 1) | ((__x & (u16)0x5555U) << 1);        \
-       __x;                                                            \
+       u16 ___x = x;                   \
+       ___x = (___x >> 8) | (___x << 8);       \
+       ___x = ((___x & (u16)0xF0F0U) >> 4) | ((___x & (u16)0x0F0FU) << 4);     \
+       ___x = ((___x & (u16)0xCCCCU) >> 2) | ((___x & (u16)0x3333U) << 2);     \
+       ___x = ((___x & (u16)0xAAAAU) >> 1) | ((___x & (u16)0x5555U) << 1);     \
+       ___x;                                                           \
 })
 
 #define __constant_bitrev8x4(x) \
 ({                     \
-       u32 __x = x;    \
-       __x = ((__x & (u32)0xF0F0F0F0UL) >> 4) | ((__x & (u32)0x0F0F0F0FUL) << 4);      \
-       __x = ((__x & (u32)0xCCCCCCCCUL) >> 2) | ((__x & (u32)0x33333333UL) << 2);      \
-       __x = ((__x & (u32)0xAAAAAAAAUL) >> 1) | ((__x & (u32)0x55555555UL) << 1);      \
-       __x;                                                            \
+       u32 ___x = x;   \
+       ___x = ((___x & (u32)0xF0F0F0F0UL) >> 4) | ((___x & (u32)0x0F0F0F0FUL) << 4);   \
+       ___x = ((___x & (u32)0xCCCCCCCCUL) >> 2) | ((___x & (u32)0x33333333UL) << 2);   \
+       ___x = ((___x & (u32)0xAAAAAAAAUL) >> 1) | ((___x & (u32)0x55555555UL) << 1);   \
+       ___x;                                                           \
 })
 
 #define __constant_bitrev8(x)  \
 ({                                     \
-       u8 __x = x;                     \
-       __x = (__x >> 4) | (__x << 4);  \
-       __x = ((__x & (u8)0xCCU) >> 2) | ((__x & (u8)0x33U) << 2);      \
-       __x = ((__x & (u8)0xAAU) >> 1) | ((__x & (u8)0x55U) << 1);      \
-       __x;                                                            \
+       u8 ___x = x;                    \
+       ___x = (___x >> 4) | (___x << 4);       \
+       ___x = ((___x & (u8)0xCCU) >> 2) | ((___x & (u8)0x33U) << 2);   \
+       ___x = ((___x & (u8)0xAAU) >> 1) | ((___x & (u8)0x55U) << 1);   \
+       ___x;                                                           \
 })
 
 #define bitrev32(x) \
index cb2aa7ecafff5cb772772db11d5ee06314f60359..db29928de46741b0887e6bd13ed577a59684d180 100644 (file)
@@ -302,6 +302,7 @@ void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list);
 void blk_mq_kick_requeue_list(struct request_queue *q);
 void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
 bool blk_mq_complete_request(struct request *rq);
+void blk_mq_complete_request_sync(struct request *rq);
 bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list,
                           struct bio *bio);
 bool blk_mq_queue_stopped(struct request_queue *q);
index f628971988449a94898d8da72ffa3743d7a3d694..e4d4c1771ab0e2602079d0a40ae36397a146cab0 100644 (file)
@@ -57,6 +57,12 @@ struct bpf_map_ops {
                             const struct btf *btf,
                             const struct btf_type *key_type,
                             const struct btf_type *value_type);
+
+       /* Direct value access helpers. */
+       int (*map_direct_value_addr)(const struct bpf_map *map,
+                                    u64 *imm, u32 off);
+       int (*map_direct_value_meta)(const struct bpf_map *map,
+                                    u64 imm, u32 *off);
 };
 
 struct bpf_map {
@@ -81,7 +87,8 @@ struct bpf_map {
        struct btf *btf;
        u32 pages;
        bool unpriv_array;
-       /* 51 bytes hole */
+       bool frozen; /* write-once */
+       /* 48 bytes hole */
 
        /* The 3rd and 4th cacheline with misc members to avoid false sharing
         * particularly with refcounting.
@@ -421,8 +428,38 @@ struct bpf_array {
        };
 };
 
+#define BPF_COMPLEXITY_LIMIT_INSNS      1000000 /* yes. 1M insns */
 #define MAX_TAIL_CALL_CNT 32
 
+#define BPF_F_ACCESS_MASK      (BPF_F_RDONLY |         \
+                                BPF_F_RDONLY_PROG |    \
+                                BPF_F_WRONLY |         \
+                                BPF_F_WRONLY_PROG)
+
+#define BPF_MAP_CAN_READ       BIT(0)
+#define BPF_MAP_CAN_WRITE      BIT(1)
+
+static inline u32 bpf_map_flags_to_cap(struct bpf_map *map)
+{
+       u32 access_flags = map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG);
+
+       /* Combination of BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG is
+        * not possible.
+        */
+       if (access_flags & BPF_F_RDONLY_PROG)
+               return BPF_MAP_CAN_READ;
+       else if (access_flags & BPF_F_WRONLY_PROG)
+               return BPF_MAP_CAN_WRITE;
+       else
+               return BPF_MAP_CAN_READ | BPF_MAP_CAN_WRITE;
+}
+
+static inline bool bpf_map_flags_access_ok(u32 access_flags)
+{
+       return (access_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) !=
+              (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG);
+}
+
 struct bpf_event_entry {
        struct perf_event *event;
        struct file *perf_file;
@@ -446,14 +483,6 @@ typedef u32 (*bpf_convert_ctx_access_t)(enum bpf_access_type type,
 u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
                     void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy);
 
-int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
-                         union bpf_attr __user *uattr);
-int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
-                         union bpf_attr __user *uattr);
-int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
-                                    const union bpf_attr *kattr,
-                                    union bpf_attr __user *uattr);
-
 /* an array of programs to be executed under rcu_lock.
  *
  * Typical usage:
@@ -644,6 +673,13 @@ static inline int bpf_map_attr_numa_node(const union bpf_attr *attr)
 struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type);
 int array_map_alloc_check(union bpf_attr *attr);
 
+int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
+                         union bpf_attr __user *uattr);
+int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
+                         union bpf_attr __user *uattr);
+int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
+                                    const union bpf_attr *kattr,
+                                    union bpf_attr __user *uattr);
 #else /* !CONFIG_BPF_SYSCALL */
 static inline struct bpf_prog *bpf_prog_get(u32 ufd)
 {
@@ -755,6 +791,27 @@ static inline struct bpf_prog *bpf_prog_get_type_path(const char *name,
 {
        return ERR_PTR(-EOPNOTSUPP);
 }
+
+static inline int bpf_prog_test_run_xdp(struct bpf_prog *prog,
+                                       const union bpf_attr *kattr,
+                                       union bpf_attr __user *uattr)
+{
+       return -ENOTSUPP;
+}
+
+static inline int bpf_prog_test_run_skb(struct bpf_prog *prog,
+                                       const union bpf_attr *kattr,
+                                       union bpf_attr __user *uattr)
+{
+       return -ENOTSUPP;
+}
+
+static inline int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
+                                                  const union bpf_attr *kattr,
+                                                  union bpf_attr __user *uattr)
+{
+       return -ENOTSUPP;
+}
 #endif /* CONFIG_BPF_SYSCALL */
 
 static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
index 7d8228d1c8981d9b73fb72a8953687c1f550eb19..b3ab61fe193290422348e50d045dbc6e31cd53c1 100644 (file)
@@ -207,6 +207,7 @@ struct bpf_verifier_state {
 struct bpf_verifier_state_list {
        struct bpf_verifier_state state;
        struct bpf_verifier_state_list *next;
+       int miss_cnt, hit_cnt;
 };
 
 /* Possible states for alu_state member. */
@@ -223,6 +224,10 @@ struct bpf_insn_aux_data {
                unsigned long map_state;        /* pointer/poison value for maps */
                s32 call_imm;                   /* saved imm field of call insn */
                u32 alu_limit;                  /* limit for add/sub register with pointer */
+               struct {
+                       u32 map_index;          /* index into used_maps[] */
+                       u32 map_off;            /* offset from value base address */
+               };
        };
        int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
        int sanitize_stack_off; /* stack slot to be cleared */
@@ -248,6 +253,12 @@ static inline bool bpf_verifier_log_full(const struct bpf_verifier_log *log)
        return log->len_used >= log->len_total - 1;
 }
 
+#define BPF_LOG_LEVEL1 1
+#define BPF_LOG_LEVEL2 2
+#define BPF_LOG_STATS  4
+#define BPF_LOG_LEVEL  (BPF_LOG_LEVEL1 | BPF_LOG_LEVEL2)
+#define BPF_LOG_MASK   (BPF_LOG_LEVEL | BPF_LOG_STATS)
+
 static inline bool bpf_verifier_log_needed(const struct bpf_verifier_log *log)
 {
        return log->level && log->ubuf && !bpf_verifier_log_full(log);
@@ -274,6 +285,7 @@ struct bpf_verifier_env {
        bool strict_alignment;          /* perform strict pointer alignment checks */
        struct bpf_verifier_state *cur_state; /* current verifier state */
        struct bpf_verifier_state_list **explored_states; /* search pruning optimization */
+       struct bpf_verifier_state_list *free_list;
        struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */
        u32 used_map_cnt;               /* number of used maps */
        u32 id_gen;                     /* used to generate unique reg IDs */
@@ -284,6 +296,21 @@ struct bpf_verifier_env {
        struct bpf_verifier_log log;
        struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 1];
        u32 subprog_cnt;
+       /* number of instructions analyzed by the verifier */
+       u32 insn_processed;
+       /* total verification time */
+       u64 verification_time;
+       /* maximum number of verifier states kept in 'branching' instructions */
+       u32 max_states_per_insn;
+       /* total number of allocated verifier states */
+       u32 total_states;
+       /* some states are freed during program analysis.
+        * this is peak number of states. this number dominates kernel
+        * memory consumption during verification
+        */
+       u32 peak_states;
+       /* longest register parentage chain walked for liveness marking */
+       u32 longest_mark_read_walk;
 };
 
 __printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log,
index 455d31b55828d2240f3e0f27920be11b75dfee57..64cdf2a23d427cf2af3f2fd264d32aeffd6c582b 100644 (file)
@@ -51,6 +51,7 @@ bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s,
                           const struct btf_member *m,
                           u32 expected_offset, u32 expected_size);
 int btf_find_spin_lock(const struct btf *btf, const struct btf_type *t);
+bool btf_type_is_void(const struct btf_type *t);
 
 #ifdef CONFIG_BPF_SYSCALL
 const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id);
index f6275c4da13a765fd60f3d34a9ca491d1d56db93..3bc91879e1e2b09ced375d4540e97094ae593f66 100644 (file)
@@ -145,18 +145,18 @@ static inline bool bvec_iter_advance(const struct bio_vec *bv,
 
 static inline struct bio_vec *bvec_init_iter_all(struct bvec_iter_all *iter_all)
 {
-       iter_all->bv.bv_page = NULL;
        iter_all->done = 0;
+       iter_all->idx = 0;
 
        return &iter_all->bv;
 }
 
-static inline void mp_bvec_next_segment(const struct bio_vec *bvec,
-                                       struct bvec_iter_all *iter_all)
+static inline void bvec_advance(const struct bio_vec *bvec,
+                               struct bvec_iter_all *iter_all)
 {
        struct bio_vec *bv = &iter_all->bv;
 
-       if (bv->bv_page) {
+       if (iter_all->done) {
                bv->bv_page = nth_page(bv->bv_page, 1);
                bv->bv_offset = 0;
        } else {
@@ -165,6 +165,12 @@ static inline void mp_bvec_next_segment(const struct bio_vec *bvec,
        }
        bv->bv_len = min_t(unsigned int, PAGE_SIZE - bv->bv_offset,
                           bvec->bv_len - iter_all->done);
+       iter_all->done += bv->bv_len;
+
+       if (iter_all->done == bvec->bv_len) {
+               iter_all->idx++;
+               iter_all->done = 0;
+       }
 }
 
 /*
index b425a7ee04ce4d2a56f4ed951ae4c7f6779818fd..4e6987e11f688bc12001cc4d030e8a3a99faa514 100644 (file)
@@ -49,8 +49,6 @@ struct bus_attribute {
        ssize_t (*store)(struct bus_type *bus, const char *buf, size_t count);
 };
 
-#define BUS_ATTR(_name, _mode, _show, _store)  \
-       struct bus_attribute bus_attr_##_name = __ATTR(_name, _mode, _show, _store)
 #define BUS_ATTR_RW(_name) \
        struct bus_attribute bus_attr_##_name = __ATTR_RW(_name)
 #define BUS_ATTR_RO(_name) \
index 8b42df09b04c9c222e3fb863daf60bef0b116a7f..dd28e7679089128a75d5ed86f5f6f435422d77eb 100644 (file)
@@ -158,6 +158,9 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
 #define FMODE_OPENED           ((__force fmode_t)0x80000)
 #define FMODE_CREATED          ((__force fmode_t)0x100000)
 
+/* File is stream-like */
+#define FMODE_STREAM           ((__force fmode_t)0x200000)
+
 /* File was opened by fanotify and shouldn't generate fanotify events */
 #define FMODE_NONOTIFY         ((__force fmode_t)0x4000000)
 
@@ -3074,6 +3077,7 @@ extern loff_t no_seek_end_llseek_size(struct file *, loff_t, int, loff_t);
 extern loff_t no_seek_end_llseek(struct file *, loff_t, int);
 extern int generic_file_open(struct inode * inode, struct file * filp);
 extern int nonseekable_open(struct inode * inode, struct file * filp);
+extern int stream_open(struct inode * inode, struct file * filp);
 
 #ifdef CONFIG_BLOCK
 typedef void (dio_submit_t)(struct bio *bio, struct inode *inode,
index ea35263eb76b76e796f2f3ffaa3378585b98db82..11943b60f2084cb5e69c6dd5553f2adf0ea4f8ca 100644 (file)
@@ -203,7 +203,6 @@ static inline void hugetlb_show_meminfo(void)
 #define pud_huge(x)    0
 #define is_hugepage_only_range(mm, addr, len)  0
 #define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
-#define hugetlb_fault(mm, vma, addr, flags)    ({ BUG(); 0; })
 #define hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, dst_addr, \
                                src_addr, pagep)        ({ BUG(); 0; })
 #define huge_pte_offset(mm, address, sz)       0
@@ -234,6 +233,13 @@ static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
 {
        BUG();
 }
+static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
+                               struct vm_area_struct *vma, unsigned long address,
+                               unsigned int flags)
+{
+       BUG();
+       return 0;
+}
 
 #endif /* !CONFIG_HUGETLB_PAGE */
 /*
index 627b788ba0ff8a31e8e602f1f0b4f2ee117082bd..ef0819ced0fc77cd180c3efa716e9a630f3ffd3d 100644 (file)
@@ -56,9 +56,6 @@ struct br_ip_list {
 
 extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *));
 
-typedef int br_should_route_hook_t(struct sk_buff *skb);
-extern br_should_route_hook_t __rcu *br_should_route_hook;
-
 #if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_IGMP_SNOOPING)
 int br_multicast_list_adjacent(struct net_device *dev,
                               struct list_head *br_ip_list);
index fa928242567db30769839ac8738be5dc58e372ab..1b6d31da7cbc3dd885e43a0c430b8cac890b13dd 100644 (file)
@@ -297,6 +297,7 @@ static inline u64 jiffies_to_nsecs(const unsigned long j)
 }
 
 extern u64 jiffies64_to_nsecs(u64 j);
+extern u64 jiffies64_to_msecs(u64 j);
 
 extern unsigned long __msecs_to_jiffies(const unsigned int m);
 #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
index c843f4a9c512588edc333075cdc3856dc9a582d3..da676cdbd7277e32feb96a56063167afc0964215 100644 (file)
@@ -38,12 +38,6 @@ struct vmcoredd_node {
 
 #ifdef CONFIG_PROC_KCORE
 void __init kclist_add(struct kcore_list *, void *, size_t, int type);
-static inline
-void kclist_add_remap(struct kcore_list *m, void *addr, void *vaddr, size_t sz)
-{
-       m->vaddr = (unsigned long)vaddr;
-       kclist_add(m, addr, sz, KCORE_REMAP);
-}
 
 extern int __init register_mem_pfn_is_ram(int (*fn)(unsigned long pfn));
 #else
@@ -51,11 +45,6 @@ static inline
 void kclist_add(struct kcore_list *new, void *addr, size_t size, int type)
 {
 }
-
-static inline
-void kclist_add_remap(struct kcore_list *m, void *addr, void *vaddr, size_t sz)
-{
-}
 #endif
 
 #endif /* _LINUX_KCORE_H */
index 34a5036debd341935a100b6fe4a7083db6262d5e..2d14e21c16c0b412535d00b3a537bf3353366d4a 100644 (file)
@@ -47,8 +47,8 @@
 
 #define u64_to_user_ptr(x) (           \
 {                                      \
-       typecheck(u64, x);              \
-       (void __user *)(uintptr_t)x;    \
+       typecheck(u64, (x));            \
+       (void __user *)(uintptr_t)(x);  \
 }                                      \
 )
 
index 9d55c63db09b5dcb9ac997d802cb00ff356d4353..640a03642766bb4ae02c86e3606318c80adaf81d 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/irqbypass.h>
 #include <linux/swait.h>
 #include <linux/refcount.h>
+#include <linux/nospec.h>
 #include <asm/signal.h>
 
 #include <linux/kvm.h>
@@ -513,10 +514,10 @@ static inline struct kvm_io_bus *kvm_get_bus(struct kvm *kvm, enum kvm_bus idx)
 
 static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
 {
-       /* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu, in case
-        * the caller has read kvm->online_vcpus before (as is the case
-        * for kvm_for_each_vcpu, for example).
-        */
+       int num_vcpus = atomic_read(&kvm->online_vcpus);
+       i = array_index_nospec(i, num_vcpus);
+
+       /* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu.  */
        smp_rmb();
        return kvm->vcpus[i];
 }
@@ -600,6 +601,7 @@ void kvm_put_kvm(struct kvm *kvm);
 
 static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id)
 {
+       as_id = array_index_nospec(as_id, KVM_ADDRESS_SPACE_NUM);
        return srcu_dereference_check(kvm->memslots[as_id], &kvm->srcu,
                        lockdep_is_held(&kvm->slots_lock) ||
                        !refcount_read(&kvm->users_count));
index 79626b5ab36cce2492a406c3ff4f50023c4c2150..58aa3adf94e63585631876b9a880c454fbd905df 100644 (file)
@@ -207,7 +207,7 @@ static inline void list_bulk_move_tail(struct list_head *head,
 }
 
 /**
- * list_is_first -- tests whether @ list is the first entry in list @head
+ * list_is_first -- tests whether @list is the first entry in list @head
  * @list: the entry to test
  * @head: the head of the list
  */
index 1f3d880b7ca1736057546bc0b98997b6b8a65aea..dbb6118370c1e3c6df88b7da5695aea57dcd08ad 100644 (file)
@@ -566,7 +566,10 @@ struct mem_cgroup *lock_page_memcg(struct page *page);
 void __unlock_page_memcg(struct mem_cgroup *memcg);
 void unlock_page_memcg(struct page *page);
 
-/* idx can be of type enum memcg_stat_item or node_stat_item */
+/*
+ * idx can be of type enum memcg_stat_item or node_stat_item.
+ * Keep in sync with memcg_exact_page_state().
+ */
 static inline unsigned long memcg_page_state(struct mem_cgroup *memcg,
                                             int idx)
 {
index 6fee8b1a4400842a7db69b0a08d6bc3edf436854..5cd824c1c0caa8c9adda4a8e6d640f43605cd4fb 100644 (file)
@@ -469,7 +469,7 @@ static inline u32 linkmode_adv_to_lcl_adv_t(unsigned long *advertising)
        if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
                              advertising))
                lcl_adv |= ADVERTISE_PAUSE_CAP;
-       if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+       if (linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
                              advertising))
                lcl_adv |= ADVERTISE_PAUSE_ASYM;
 
index 612c8c2f2466ab98dd634845180028f207f2fd0f..769326ea1d9b5a32d35bfa64126d18fdbef8a6d6 100644 (file)
@@ -170,7 +170,7 @@ static inline void mlx5_cq_arm(struct mlx5_core_cq *cq, u32 cmd,
        doorbell[0] = cpu_to_be32(sn << 28 | cmd | ci);
        doorbell[1] = cpu_to_be32(cq->cqn);
 
-       mlx5_write64(doorbell, uar_page + MLX5_CQ_DOORBELL, NULL);
+       mlx5_write64(doorbell, uar_page + MLX5_CQ_DOORBELL);
 }
 
 static inline void mlx5_cq_hold(struct mlx5_core_cq *cq)
index f93a5598b942ff4eddb8dcebb218b8a77d0d7332..db7dca75d72624b24bfb471403cda4696abdfd94 100644 (file)
@@ -361,6 +361,7 @@ enum {
 
 enum {
        MLX5_GENERAL_SUBTYPE_DELAY_DROP_TIMEOUT = 0x1,
+       MLX5_GENERAL_SUBTYPE_PCI_POWER_CHANGE_EVENT = 0x5,
 };
 
 enum {
index 0787de28f2fcd6372226d7e28c8920c1d7a8d6dc..5c267707e1df7c3efea359743028013ca13c1ac5 100644 (file)
 #define MLX5_BF_OFFSET       0x800
 #define MLX5_CQ_DOORBELL      0x20
 
-#if BITS_PER_LONG == 64
 /* Assume that we can just write a 64-bit doorbell atomically.  s390
  * actually doesn't have writeq() but S/390 systems don't even have
  * PCI so we won't worry about it.
+ *
+ * Note that the write is not atomic on 32-bit systems! In contrast to 64-bit
+ * ones, it requires proper locking. mlx5_write64 doesn't do any locking, so use
+ * it at your own discretion, protected by some kind of lock on 32 bits.
+ *
+ * TODO: use write{q,l}_relaxed()
  */
 
-#define MLX5_DECLARE_DOORBELL_LOCK(name)
-#define MLX5_INIT_DOORBELL_LOCK(ptr)    do { } while (0)
-#define MLX5_GET_DOORBELL_LOCK(ptr)      (NULL)
-
-static inline void mlx5_write64(__be32 val[2], void __iomem *dest,
-                               spinlock_t *doorbell_lock)
+static inline void mlx5_write64(__be32 val[2], void __iomem *dest)
 {
+#if BITS_PER_LONG == 64
        __raw_writeq(*(u64 *)val, dest);
-}
-
 #else
-
-/* Just fall back to a spinlock to protect the doorbell if
- * BITS_PER_LONG is 32 -- there's no portable way to do atomic 64-bit
- * MMIO writes.
- */
-
-#define MLX5_DECLARE_DOORBELL_LOCK(name) spinlock_t name;
-#define MLX5_INIT_DOORBELL_LOCK(ptr)     spin_lock_init(ptr)
-#define MLX5_GET_DOORBELL_LOCK(ptr)      (ptr)
-
-static inline void mlx5_write64(__be32 val[2], void __iomem *dest,
-                               spinlock_t *doorbell_lock)
-{
-       unsigned long flags;
-
-       if (doorbell_lock)
-               spin_lock_irqsave(doorbell_lock, flags);
        __raw_writel((__force u32) val[0], dest);
        __raw_writel((__force u32) val[1], dest + 4);
-       if (doorbell_lock)
-               spin_unlock_irqrestore(doorbell_lock, flags);
-}
-
 #endif
+}
 
 #endif /* MLX5_DOORBELL_H */
index 022541dc5dbfd7b12a54601c1d1a59e30eed8a37..d2d380d5e41552f73c6c5289cf3169371e1241f6 100644 (file)
@@ -133,6 +133,7 @@ enum {
        MLX5_REG_MTRC_CONF       = 0x9041,
        MLX5_REG_MTRC_STDB       = 0x9042,
        MLX5_REG_MTRC_CTRL       = 0x9043,
+       MLX5_REG_MPEIN           = 0x9050,
        MLX5_REG_MPCNT           = 0x9051,
        MLX5_REG_MTPPS           = 0x9053,
        MLX5_REG_MTPPSE          = 0x9054,
@@ -594,6 +595,8 @@ enum mlx5_pagefault_type_flags {
 };
 
 struct mlx5_td {
+       /* protects tirs list changes while tirs refresh */
+       struct mutex     list_lock;
        struct list_head tirs_list;
        u32              tdn;
 };
@@ -660,6 +663,7 @@ struct mlx5_core_dev {
        u64                     sys_image_guid;
        phys_addr_t             iseg_base;
        struct mlx5_init_seg __iomem *iseg;
+       phys_addr_t             bar_addr;
        enum mlx5_device_state  state;
        /* sync interface state */
        struct mutex            intf_state_mutex;
@@ -885,6 +889,7 @@ void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome);
 int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type);
 int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn);
 int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn);
+void mlx5_health_flush(struct mlx5_core_dev *dev);
 void mlx5_health_cleanup(struct mlx5_core_dev *dev);
 int mlx5_health_init(struct mlx5_core_dev *dev);
 void mlx5_start_health_poll(struct mlx5_core_dev *dev);
index b0e17c94566c130978ca03f2a8118752f3c90ff9..0e0e63d4d7aa4fb9e57139e30cb871a37c22ecf1 100644 (file)
@@ -8027,6 +8027,52 @@ struct mlx5_ifc_ppcnt_reg_bits {
        union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits counter_set;
 };
 
+struct mlx5_ifc_mpein_reg_bits {
+       u8         reserved_at_0[0x2];
+       u8         depth[0x6];
+       u8         pcie_index[0x8];
+       u8         node[0x8];
+       u8         reserved_at_18[0x8];
+
+       u8         capability_mask[0x20];
+
+       u8         reserved_at_40[0x8];
+       u8         link_width_enabled[0x8];
+       u8         link_speed_enabled[0x10];
+
+       u8         lane0_physical_position[0x8];
+       u8         link_width_active[0x8];
+       u8         link_speed_active[0x10];
+
+       u8         num_of_pfs[0x10];
+       u8         num_of_vfs[0x10];
+
+       u8         bdf0[0x10];
+       u8         reserved_at_b0[0x10];
+
+       u8         max_read_request_size[0x4];
+       u8         max_payload_size[0x4];
+       u8         reserved_at_c8[0x5];
+       u8         pwr_status[0x3];
+       u8         port_type[0x4];
+       u8         reserved_at_d4[0xb];
+       u8         lane_reversal[0x1];
+
+       u8         reserved_at_e0[0x14];
+       u8         pci_power[0xc];
+
+       u8         reserved_at_100[0x20];
+
+       u8         device_status[0x10];
+       u8         port_state[0x8];
+       u8         reserved_at_138[0x8];
+
+       u8         reserved_at_140[0x10];
+       u8         receiver_detect_result[0x10];
+
+       u8         reserved_at_160[0x20];
+};
+
 struct mlx5_ifc_mpcnt_reg_bits {
        u8         reserved_at_0[0x8];
        u8         pcie_index[0x8];
@@ -8346,7 +8392,9 @@ struct mlx5_ifc_pcam_reg_bits {
 };
 
 struct mlx5_ifc_mcam_enhanced_features_bits {
-       u8         reserved_at_0[0x74];
+       u8         reserved_at_0[0x6e];
+       u8         pci_status_and_power[0x1];
+       u8         reserved_at_6f[0x5];
        u8         mark_tx_action_cnp[0x1];
        u8         mark_tx_action_cqe[0x1];
        u8         dynamic_tx_overflow[0x1];
@@ -8954,6 +9002,7 @@ union mlx5_ifc_ports_control_registers_document_bits {
        struct mlx5_ifc_pmtu_reg_bits pmtu_reg;
        struct mlx5_ifc_ppad_reg_bits ppad_reg;
        struct mlx5_ifc_ppcnt_reg_bits ppcnt_reg;
+       struct mlx5_ifc_mpein_reg_bits mpein_reg;
        struct mlx5_ifc_mpcnt_reg_bits mpcnt_reg;
        struct mlx5_ifc_pplm_reg_bits pplm_reg;
        struct mlx5_ifc_pplr_reg_bits pplr_reg;
index 76769749b5a5d546daf7f7513318ad65a494da4c..6b10c21630f54bdd14ddd6efa1510777165ec558 100644 (file)
@@ -966,6 +966,10 @@ static inline bool is_pci_p2pdma_page(const struct page *page)
 }
 #endif /* CONFIG_DEV_PAGEMAP_OPS */
 
+/* 127: arbitrary random number, small enough to assemble well */
+#define page_ref_zero_or_close_to_overflow(page) \
+       ((unsigned int) page_ref_count(page) + 127u <= 127u)
+
 static inline void get_page(struct page *page)
 {
        page = compound_head(page);
@@ -973,8 +977,17 @@ static inline void get_page(struct page *page)
         * Getting a normal page or the head of a compound page
         * requires to already have an elevated page->_refcount.
         */
-       VM_BUG_ON_PAGE(page_ref_count(page) <= 0, page);
+       VM_BUG_ON_PAGE(page_ref_zero_or_close_to_overflow(page), page);
+       page_ref_inc(page);
+}
+
+static inline __must_check bool try_get_page(struct page *page)
+{
+       page = compound_head(page);
+       if (WARN_ON_ONCE(page_ref_count(page) <= 0))
+               return false;
        page_ref_inc(page);
+       return true;
 }
 
 static inline void put_page(struct page *page)
index 7eade9132f02e4f85f423404341c6241d335f4d4..4ef4bbe78a1da163fee585597b57ff215c819be6 100644 (file)
@@ -671,7 +671,7 @@ enum vm_fault_reason {
 
 /* Encode hstate index for a hwpoisoned large page */
 #define VM_FAULT_SET_HINDEX(x) ((__force vm_fault_t)((x) << 16))
-#define VM_FAULT_GET_HINDEX(x) (((x) >> 16) & 0xf)
+#define VM_FAULT_GET_HINDEX(x) (((__force unsigned int)(x) >> 16) & 0xf)
 
 #define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS |       \
                        VM_FAULT_SIGSEGV | VM_FAULT_HWPOISON |  \
index 78f5ec4ebf64496886e34a078f563291c47fea05..c46d218a0456becc3296f7de00aaca424b465b47 100644 (file)
@@ -1498,6 +1498,7 @@ struct net_device_ops {
  * @IFF_FAILOVER: device is a failover master device
  * @IFF_FAILOVER_SLAVE: device is lower dev of a failover master device
  * @IFF_L3MDEV_RX_HANDLER: only invoke the rx handler of L3 master device
+ * @IFF_LIVE_RENAME_OK: rename is allowed while device is up and running
  */
 enum netdev_priv_flags {
        IFF_802_1Q_VLAN                 = 1<<0,
@@ -1530,6 +1531,7 @@ enum netdev_priv_flags {
        IFF_FAILOVER                    = 1<<27,
        IFF_FAILOVER_SLAVE              = 1<<28,
        IFF_L3MDEV_RX_HANDLER           = 1<<29,
+       IFF_LIVE_RENAME_OK              = 1<<30,
 };
 
 #define IFF_802_1Q_VLAN                        IFF_802_1Q_VLAN
@@ -1561,6 +1563,7 @@ enum netdev_priv_flags {
 #define IFF_FAILOVER                   IFF_FAILOVER
 #define IFF_FAILOVER_SLAVE             IFF_FAILOVER_SLAVE
 #define IFF_L3MDEV_RX_HANDLER          IFF_L3MDEV_RX_HANDLER
+#define IFF_LIVE_RENAME_OK             IFF_LIVE_RENAME_OK
 
 /**
  *     struct net_device - The DEVICE structure.
@@ -2659,14 +2662,6 @@ void netdev_freemem(struct net_device *dev);
 void synchronize_net(void);
 int init_dummy_netdev(struct net_device *dev);
 
-DECLARE_PER_CPU(int, xmit_recursion);
-#define XMIT_RECURSION_LIMIT   10
-
-static inline int dev_recursion_level(void)
-{
-       return this_cpu_read(xmit_recursion);
-}
-
 struct net_device *dev_get_by_index(struct net *net, int ifindex);
 struct net_device *__dev_get_by_index(struct net *net, int ifindex);
 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
@@ -3015,6 +3010,11 @@ struct softnet_data {
 #ifdef CONFIG_XFRM_OFFLOAD
        struct sk_buff_head     xfrm_backlog;
 #endif
+       /* written and read only by owning cpu: */
+       struct {
+               u16 recursion;
+               u8  more;
+       } xmit;
 #ifdef CONFIG_RPS
        /* input_queue_head should be written by cpu owning this struct,
         * and only read by other cpus. Worth using a cache line.
@@ -3050,6 +3050,28 @@ static inline void input_queue_tail_incr_save(struct softnet_data *sd,
 
 DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
 
+static inline int dev_recursion_level(void)
+{
+       return this_cpu_read(softnet_data.xmit.recursion);
+}
+
+#define XMIT_RECURSION_LIMIT   10
+static inline bool dev_xmit_recursion(void)
+{
+       return unlikely(__this_cpu_read(softnet_data.xmit.recursion) >
+                       XMIT_RECURSION_LIMIT);
+}
+
+static inline void dev_xmit_recursion_inc(void)
+{
+       __this_cpu_inc(softnet_data.xmit.recursion);
+}
+
+static inline void dev_xmit_recursion_dec(void)
+{
+       __this_cpu_dec(softnet_data.xmit.recursion);
+}
+
 void __netif_schedule(struct Qdisc *q);
 void netif_schedule_queue(struct netdev_queue *txq);
 
@@ -4405,10 +4427,15 @@ static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops,
                                              struct sk_buff *skb, struct net_device *dev,
                                              bool more)
 {
-       skb->xmit_more = more ? 1 : 0;
+       __this_cpu_write(softnet_data.xmit.more, more);
        return ops->ndo_start_xmit(skb, dev);
 }
 
+static inline bool netdev_xmit_more(void)
+{
+       return __this_cpu_read(softnet_data.xmit.more);
+}
+
 static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev,
                                            struct netdev_queue *txq, bool more)
 {
index 72cb19c3db6aa99e1779649c7213b6067fd29cda..a7252f3baeb0b7ca8db088f6776c39bc77940858 100644 (file)
@@ -24,10 +24,17 @@ static inline int NF_DROP_GETERR(int verdict)
 static inline int nf_inet_addr_cmp(const union nf_inet_addr *a1,
                                   const union nf_inet_addr *a2)
 {
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
+       const unsigned long *ul1 = (const unsigned long *)a1;
+       const unsigned long *ul2 = (const unsigned long *)a2;
+
+       return ((ul1[0] ^ ul2[0]) | (ul1[1] ^ ul2[1])) == 0UL;
+#else
        return a1->all[0] == a2->all[0] &&
               a1->all[1] == a2->all[1] &&
               a1->all[2] == a2->all[2] &&
               a1->all[3] == a2->all[3];
+#endif
 }
 
 static inline void nf_inet_addr_mask(const union nf_inet_addr *a1,
@@ -360,7 +367,7 @@ extern struct nf_nat_hook __rcu *nf_nat_hook;
 static inline void
 nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
 {
-#ifdef CONFIG_NF_NAT_NEEDED
+#if IS_ENABLED(CONFIG_NF_NAT)
        struct nf_nat_hook *nat_hook;
 
        rcu_read_lock();
index c6000046c96690808525e6e5f534f9902f8900ce..788613f3693590827514c39c0bdc8712a6bc260c 100644 (file)
@@ -21,13 +21,18 @@ struct nf_osf_finger {
        struct nf_osf_user_finger       finger;
 };
 
+struct nf_osf_data {
+       const char *genre;
+       const char *version;
+};
+
 bool nf_osf_match(const struct sk_buff *skb, u_int8_t family,
                  int hooknum, struct net_device *in, struct net_device *out,
                  const struct nf_osf_info *info, struct net *net,
                  const struct list_head *nf_osf_fingers);
 
-const char *nf_osf_find(const struct sk_buff *skb,
-                       const struct list_head *nf_osf_fingers,
-                       const int ttl_check);
+bool nf_osf_find(const struct sk_buff *skb,
+                const struct list_head *nf_osf_fingers,
+                const int ttl_check, struct nf_osf_data *data);
 
 #endif /* _NFOSF_H */
index bf384b3eedb8da0628258ccace89d2e19c0f5105..1f852ef7b0983cd67b95bf4e833681803397f0e7 100644 (file)
@@ -317,7 +317,6 @@ struct xt_table_info *xt_replace_table(struct xt_table *table,
                                       int *error);
 
 struct xt_match *xt_find_match(u8 af, const char *name, u8 revision);
-struct xt_target *xt_find_target(u8 af, const char *name, u8 revision);
 struct xt_match *xt_request_find_match(u8 af, const char *name, u8 revision);
 struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision);
 int xt_find_revision(u8 af, const char *name, u8 revision, int target,
index 471e9467105b32a820fb13b9659d44be4f29aae3..12113e50265639976f8564b19cdae43e4a232309 100644 (file)
@@ -87,6 +87,21 @@ static inline int nf_ip6_route(struct net *net, struct dst_entry **dst,
 }
 
 int ip6_route_me_harder(struct net *net, struct sk_buff *skb);
+
+static inline int nf_ip6_route_me_harder(struct net *net, struct sk_buff *skb)
+{
+#if IS_MODULE(CONFIG_IPV6)
+       const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops();
+
+       if (!v6_ops)
+               return -EHOSTUNREACH;
+
+       return v6_ops->route_me_harder(net, skb);
+#else
+       return ip6_route_me_harder(net, skb);
+#endif
+}
+
 __sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook,
                        unsigned int dataoff, u_int8_t protocol);
 
index baa49e6a23cc7a16092962e4d4a6bfbba9132376..c40720cb59acc4190d40aafdfe23b42ef8159cf9 100644 (file)
@@ -967,8 +967,13 @@ struct nvme_get_log_page_command {
        __le16                  numdl;
        __le16                  numdu;
        __u16                   rsvd11;
-       __le32                  lpol;
-       __le32                  lpou;
+       union {
+               struct {
+                       __le32 lpol;
+                       __le32 lpou;
+               };
+               __le64 lpo;
+       };
        __u32                   rsvd14[2];
 };
 
index 4eb26d2780460a6a821f110ce903ad2a1a54ccce..280ae96dc4c300d29418ececeb5e9b91acf27e2e 100644 (file)
@@ -41,16 +41,6 @@ int move_freepages_block(struct zone *zone, struct page *page,
 
 /*
  * Changes migrate type in [start_pfn, end_pfn) to be MIGRATE_ISOLATE.
- * If specified range includes migrate types other than MOVABLE or CMA,
- * this will fail with -EBUSY.
- *
- * For isolating all pages in the range finally, the caller have to
- * free all pages in the range. test_page_isolated() can be used for
- * test it.
- *
- * The following flags are allowed (they can be combined in a bit mask)
- * SKIP_HWPOISON - ignore hwpoison pages
- * REPORT_FAILURE - report details about the failure to isolate the range
  */
 int
 start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
index 34084892a466d6350984b69d07bd39cc9ecdb938..0f9552b17ee72390b6d764fac6ea4760fc75121c 100644 (file)
@@ -345,6 +345,7 @@ struct phy_c45_device_ids {
  * is_c45:  Set to true if this phy uses clause 45 addressing.
  * is_internal: Set to true if this phy is internal to a MAC.
  * is_pseudo_fixed_link: Set to true if this phy is an Ethernet switch, etc.
+ * is_gigabit_capable: Set to true if PHY supports 1000Mbps
  * has_fixups: Set to true if this phy has fixups/quirks.
  * suspended: Set to true if this phy has been suspended successfully.
  * sysfs_links: Internal boolean tracking sysfs symbolic links setup/removal.
@@ -382,6 +383,7 @@ struct phy_device {
        unsigned is_c45:1;
        unsigned is_internal:1;
        unsigned is_pseudo_fixed_link:1;
+       unsigned is_gigabit_capable:1;
        unsigned has_fixups:1;
        unsigned suspended:1;
        unsigned sysfs_links:1;
@@ -390,6 +392,7 @@ struct phy_device {
        unsigned autoneg:1;
        /* The most recently read link state */
        unsigned link:1;
+       unsigned autoneg_complete:1;
 
        /* Interrupts are enabled */
        unsigned interrupts:1;
@@ -1075,6 +1078,7 @@ void phy_attached_info(struct phy_device *phydev);
 
 /* Clause 22 PHY */
 int genphy_config_init(struct phy_device *phydev);
+int genphy_read_abilities(struct phy_device *phydev);
 int genphy_setup_forced(struct phy_device *phydev);
 int genphy_restart_aneg(struct phy_device *phydev);
 int genphy_config_eee_advert(struct phy_device *phydev);
index 787d224ff43e1fc72ceed0164bf26a4f2fa08794..abb2dac3da9b9cf69b477c2d3726e019a0352b7a 100644 (file)
@@ -101,18 +101,20 @@ struct pipe_buf_operations {
        /*
         * Get a reference to the pipe buffer.
         */
-       void (*get)(struct pipe_inode_info *, struct pipe_buffer *);
+       bool (*get)(struct pipe_inode_info *, struct pipe_buffer *);
 };
 
 /**
  * pipe_buf_get - get a reference to a pipe_buffer
  * @pipe:      the pipe that the buffer belongs to
  * @buf:       the buffer to get a reference to
+ *
+ * Return: %true if the reference was successfully obtained.
  */
-static inline void pipe_buf_get(struct pipe_inode_info *pipe,
+static inline __must_check bool pipe_buf_get(struct pipe_inode_info *pipe,
                                struct pipe_buffer *buf)
 {
-       buf->ops->get(pipe, buf);
+       return buf->ops->get(pipe, buf);
 }
 
 /**
@@ -171,7 +173,7 @@ struct pipe_inode_info *alloc_pipe_info(void);
 void free_pipe_info(struct pipe_inode_info *);
 
 /* Generic pipe buffer ops functions */
-void generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *);
+bool generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *);
 int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *);
 int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *);
 void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *);
index a867637e172d75cbe77a6593309c30cb82a1d197..9e46678edb2aff1e5f0a0d2247e969829551adca 100644 (file)
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL+ */
+/* SPDX-License-Identifier: GPL-2.0+ */
 
 /*
  * AMD FCH gpio driver platform-data
index 3ab892208343c2d22d71a630f1097d6941534518..7a37ac27d0fb21d9f8afde973c7618e42574534b 100644 (file)
@@ -35,10 +35,13 @@ struct pmc_clk {
  *
  * @base:      PMC clock register base offset
  * @clks:      pointer to set of registered clocks, typically 0..5
+ * @critical:  flag to indicate if firmware enabled pmc_plt_clks
+ *             should be marked as critial or not
  */
 struct pmc_clk_data {
        void __iomem *base;
        const struct pmc_clk *clks;
+       bool critical;
 };
 
 #endif /* __PLATFORM_DATA_X86_CLK_PMC_ATOM_H */
index edb9b040c94c31fe1ff91242232c10d0aea514ac..d5084ebd9f03045e7706872d0db88c3b06d88847 100644 (file)
@@ -9,6 +9,13 @@
 #include <linux/bug.h>                 /* For BUG_ON.  */
 #include <linux/pid_namespace.h>       /* For task_active_pid_ns.  */
 #include <uapi/linux/ptrace.h>
+#include <linux/seccomp.h>
+
+/* Add sp to seccomp_data, as seccomp is user API, we don't want to modify it */
+struct syscall_info {
+       __u64                   sp;
+       struct seccomp_data     data;
+};
 
 extern int ptrace_access_vm(struct task_struct *tsk, unsigned long addr,
                            void *buf, int len, unsigned int gup_flags);
@@ -407,9 +414,7 @@ static inline void user_single_step_report(struct pt_regs *regs)
 #define current_user_stack_pointer() user_stack_pointer(current_pt_regs())
 #endif
 
-extern int task_current_syscall(struct task_struct *target, long *callno,
-                               unsigned long args[6], unsigned int maxargs,
-                               unsigned long *sp, unsigned long *pc);
+extern int task_current_syscall(struct task_struct *target, struct syscall_info *info);
 
 extern void sigaction_compat_abi(struct k_sigaction *act, struct k_sigaction *oact);
 #endif
index 763d613ce2c2f275037c3b1b5ce4d390394295c6..57467cbf4c5b1e0e8e68e440f1feadc45b1e806e 100644 (file)
@@ -48,7 +48,6 @@ typedef int (*rht_obj_cmpfn_t)(struct rhashtable_compare_arg *arg,
  * @head_offset: Offset of rhash_head in struct to be hashed
  * @max_size: Maximum size while expanding
  * @min_size: Minimum size while shrinking
- * @locks_mul: Number of bucket locks to allocate per cpu (default: 32)
  * @automatic_shrinking: Enable automatic shrinking of tables
  * @hashfn: Hash function (default: jhash2 if !(key_len % 4), or jhash)
  * @obj_hashfn: Function to hash object
@@ -62,7 +61,6 @@ struct rhashtable_params {
        unsigned int            max_size;
        u16                     min_size;
        bool                    automatic_shrinking;
-       u8                      locks_mul;
        rht_hashfn_t            hashfn;
        rht_obj_hashfn_t        obj_hashfn;
        rht_obj_cmpfn_t         obj_cmpfn;
index 86dfa417848d36367e1d42f8f92a2e62b46c2f7b..f7714d3b46bd03139e9072653a22b8750d5ad6e5 100644 (file)
 #include <linux/list_nulls.h>
 #include <linux/workqueue.h>
 #include <linux/rculist.h>
+#include <linux/bit_spinlock.h>
 
 #include <linux/rhashtable-types.h>
 /*
+ * Objects in an rhashtable have an embedded struct rhash_head
+ * which is linked into as hash chain from the hash table - or one
+ * of two or more hash tables when the rhashtable is being resized.
  * The end of the chain is marked with a special nulls marks which has
- * the least significant bit set.
+ * the least significant bit set but otherwise stores the address of
+ * the hash bucket.  This allows us to be be sure we've found the end
+ * of the right list.
+ * The value stored in the hash bucket has BIT(0) used as a lock bit.
+ * This bit must be atomically set before any changes are made to
+ * the chain.  To avoid dereferencing this pointer without clearing
+ * the bit first, we use an opaque 'struct rhash_lock_head *' for the
+ * pointer stored in the bucket.  This struct needs to be defined so
+ * that rcu_dereference() works on it, but it has no content so a
+ * cast is needed for it to be useful.  This ensures it isn't
+ * used by mistake with clearing the lock bit first.
  */
+struct rhash_lock_head {};
 
 /* Maximum chain length before rehash
  *
@@ -52,8 +67,6 @@
  * @nest: Number of bits of first-level nested table.
  * @rehash: Current bucket being rehashed
  * @hash_rnd: Random seed to fold into hash
- * @locks_mask: Mask to apply before accessing locks[]
- * @locks: Array of spinlocks protecting individual buckets
  * @walkers: List of active walkers
  * @rcu: RCU structure for freeing the table
  * @future_tbl: Table under construction during rehashing
@@ -64,29 +77,33 @@ struct bucket_table {
        unsigned int            size;
        unsigned int            nest;
        u32                     hash_rnd;
-       unsigned int            locks_mask;
-       spinlock_t              *locks;
        struct list_head        walkers;
        struct rcu_head         rcu;
 
        struct bucket_table __rcu *future_tbl;
 
-       struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp;
+       struct lockdep_map      dep_map;
+
+       struct rhash_lock_head __rcu *buckets[] ____cacheline_aligned_in_smp;
 };
 
 /*
  * NULLS_MARKER() expects a hash value with the low
  * bits mostly likely to be significant, and it discards
  * the msb.
- * We git it an address, in which the bottom 2 bits are
+ * We give it an address, in which the bottom bit is
  * always 0, and the msb might be significant.
  * So we shift the address down one bit to align with
  * expectations and avoid losing a significant bit.
+ *
+ * We never store the NULLS_MARKER in the hash table
+ * itself as we need the lsb for locking.
+ * Instead we store a NULL
  */
 #define        RHT_NULLS_MARKER(ptr)   \
        ((void *)NULLS_MARKER(((unsigned long) (ptr)) >> 1))
 #define INIT_RHT_NULLS_HEAD(ptr)       \
-       ((ptr) = RHT_NULLS_MARKER(&(ptr)))
+       ((ptr) = NULL)
 
 static inline bool rht_is_a_nulls(const struct rhash_head *ptr)
 {
@@ -206,25 +223,6 @@ static inline bool rht_grow_above_max(const struct rhashtable *ht,
        return atomic_read(&ht->nelems) >= ht->max_elems;
 }
 
-/* The bucket lock is selected based on the hash and protects mutations
- * on a group of hash buckets.
- *
- * A maximum of tbl->size/2 bucket locks is allocated. This ensures that
- * a single lock always covers both buckets which may both contains
- * entries which link to the same bucket of the old table during resizing.
- * This allows to simplify the locking as locking the bucket in both
- * tables during resize always guarantee protection.
- *
- * IMPORTANT: When holding the bucket lock of both the old and new table
- * during expansions and shrinking, the old bucket lock must always be
- * acquired first.
- */
-static inline spinlock_t *rht_bucket_lock(const struct bucket_table *tbl,
-                                         unsigned int hash)
-{
-       return &tbl->locks[hash & tbl->locks_mask];
-}
-
 #ifdef CONFIG_PROVE_LOCKING
 int lockdep_rht_mutex_is_held(struct rhashtable *ht);
 int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash);
@@ -263,11 +261,13 @@ void rhashtable_free_and_destroy(struct rhashtable *ht,
                                 void *arg);
 void rhashtable_destroy(struct rhashtable *ht);
 
-struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl,
-                                           unsigned int hash);
-struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht,
-                                                  struct bucket_table *tbl,
+struct rhash_lock_head __rcu **rht_bucket_nested(const struct bucket_table *tbl,
+                                                unsigned int hash);
+struct rhash_lock_head __rcu **__rht_bucket_nested(const struct bucket_table *tbl,
                                                   unsigned int hash);
+struct rhash_lock_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht,
+                                                       struct bucket_table *tbl,
+                                                       unsigned int hash);
 
 #define rht_dereference(p, ht) \
        rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht))
@@ -284,27 +284,126 @@ struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht,
 #define rht_entry(tpos, pos, member) \
        ({ tpos = container_of(pos, typeof(*tpos), member); 1; })
 
-static inline struct rhash_head __rcu *const *rht_bucket(
+static inline struct rhash_lock_head __rcu *const *rht_bucket(
        const struct bucket_table *tbl, unsigned int hash)
 {
        return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) :
                                     &tbl->buckets[hash];
 }
 
-static inline struct rhash_head __rcu **rht_bucket_var(
+static inline struct rhash_lock_head __rcu **rht_bucket_var(
        struct bucket_table *tbl, unsigned int hash)
 {
-       return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) :
+       return unlikely(tbl->nest) ? __rht_bucket_nested(tbl, hash) :
                                     &tbl->buckets[hash];
 }
 
-static inline struct rhash_head __rcu **rht_bucket_insert(
+static inline struct rhash_lock_head __rcu **rht_bucket_insert(
        struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash)
 {
        return unlikely(tbl->nest) ? rht_bucket_nested_insert(ht, tbl, hash) :
                                     &tbl->buckets[hash];
 }
 
+/*
+ * We lock a bucket by setting BIT(0) in the pointer - this is always
+ * zero in real pointers.  The NULLS mark is never stored in the bucket,
+ * rather we store NULL if the bucket is empty.
+ * bit_spin_locks do not handle contention well, but the whole point
+ * of the hashtable design is to achieve minimum per-bucket contention.
+ * A nested hash table might not have a bucket pointer.  In that case
+ * we cannot get a lock.  For remove and replace the bucket cannot be
+ * interesting and doesn't need locking.
+ * For insert we allocate the bucket if this is the last bucket_table,
+ * and then take the lock.
+ * Sometimes we unlock a bucket by writing a new pointer there.  In that
+ * case we don't need to unlock, but we do need to reset state such as
+ * local_bh. For that we have rht_assign_unlock().  As rcu_assign_pointer()
+ * provides the same release semantics that bit_spin_unlock() provides,
+ * this is safe.
+ * When we write to a bucket without unlocking, we use rht_assign_locked().
+ */
+
+static inline void rht_lock(struct bucket_table *tbl,
+                           struct rhash_lock_head **bkt)
+{
+       local_bh_disable();
+       bit_spin_lock(0, (unsigned long *)bkt);
+       lock_map_acquire(&tbl->dep_map);
+}
+
+static inline void rht_lock_nested(struct bucket_table *tbl,
+                                  struct rhash_lock_head **bucket,
+                                  unsigned int subclass)
+{
+       local_bh_disable();
+       bit_spin_lock(0, (unsigned long *)bucket);
+       lock_acquire_exclusive(&tbl->dep_map, subclass, 0, NULL, _THIS_IP_);
+}
+
+static inline void rht_unlock(struct bucket_table *tbl,
+                             struct rhash_lock_head **bkt)
+{
+       lock_map_release(&tbl->dep_map);
+       bit_spin_unlock(0, (unsigned long *)bkt);
+       local_bh_enable();
+}
+
+/*
+ * Where 'bkt' is a bucket and might be locked:
+ *   rht_ptr() dereferences that pointer and clears the lock bit.
+ *   rht_ptr_exclusive() dereferences in a context where exclusive
+ *            access is guaranteed, such as when destroying the table.
+ */
+static inline struct rhash_head *rht_ptr(
+       struct rhash_lock_head __rcu * const *bkt,
+       struct bucket_table *tbl,
+       unsigned int hash)
+{
+       const struct rhash_lock_head *p =
+               rht_dereference_bucket_rcu(*bkt, tbl, hash);
+
+       if ((((unsigned long)p) & ~BIT(0)) == 0)
+               return RHT_NULLS_MARKER(bkt);
+       return (void *)(((unsigned long)p) & ~BIT(0));
+}
+
+static inline struct rhash_head *rht_ptr_exclusive(
+       struct rhash_lock_head __rcu * const *bkt)
+{
+       const struct rhash_lock_head *p =
+               rcu_dereference_protected(*bkt, 1);
+
+       if (!p)
+               return RHT_NULLS_MARKER(bkt);
+       return (void *)(((unsigned long)p) & ~BIT(0));
+}
+
+static inline void rht_assign_locked(struct rhash_lock_head __rcu **bkt,
+                                    struct rhash_head *obj)
+{
+       struct rhash_head __rcu **p = (struct rhash_head __rcu **)bkt;
+
+       if (rht_is_a_nulls(obj))
+               obj = NULL;
+       rcu_assign_pointer(*p, (void *)((unsigned long)obj | BIT(0)));
+}
+
+static inline void rht_assign_unlock(struct bucket_table *tbl,
+                                    struct rhash_lock_head __rcu **bkt,
+                                    struct rhash_head *obj)
+{
+       struct rhash_head __rcu **p = (struct rhash_head __rcu **)bkt;
+
+       if (rht_is_a_nulls(obj))
+               obj = NULL;
+       lock_map_release(&tbl->dep_map);
+       rcu_assign_pointer(*p, obj);
+       preempt_enable();
+       __release(bitlock);
+       local_bh_enable();
+}
+
 /**
  * rht_for_each_from - iterate over hash chain from given head
  * @pos:       the &struct rhash_head to use as a loop cursor.
@@ -313,8 +412,8 @@ static inline struct rhash_head __rcu **rht_bucket_insert(
  * @hash:      the hash value / bucket index
  */
 #define rht_for_each_from(pos, head, tbl, hash) \
-       for (pos = rht_dereference_bucket(head, tbl, hash); \
-            !rht_is_a_nulls(pos); \
+       for (pos = head;                        \
+            !rht_is_a_nulls(pos);              \
             pos = rht_dereference_bucket((pos)->next, tbl, hash))
 
 /**
@@ -324,7 +423,8 @@ static inline struct rhash_head __rcu **rht_bucket_insert(
  * @hash:      the hash value / bucket index
  */
 #define rht_for_each(pos, tbl, hash) \
-       rht_for_each_from(pos, *rht_bucket(tbl, hash), tbl, hash)
+       rht_for_each_from(pos, rht_ptr(rht_bucket(tbl, hash), tbl, hash),  \
+                         tbl, hash)
 
 /**
  * rht_for_each_entry_from - iterate over hash chain from given head
@@ -336,7 +436,7 @@ static inline struct rhash_head __rcu **rht_bucket_insert(
  * @member:    name of the &struct rhash_head within the hashable struct.
  */
 #define rht_for_each_entry_from(tpos, pos, head, tbl, hash, member)    \
-       for (pos = rht_dereference_bucket(head, tbl, hash);             \
+       for (pos = head;                                                \
             (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member);    \
             pos = rht_dereference_bucket((pos)->next, tbl, hash))
 
@@ -349,8 +449,9 @@ static inline struct rhash_head __rcu **rht_bucket_insert(
  * @member:    name of the &struct rhash_head within the hashable struct.
  */
 #define rht_for_each_entry(tpos, pos, tbl, hash, member)               \
-       rht_for_each_entry_from(tpos, pos, *rht_bucket(tbl, hash),      \
-                                   tbl, hash, member)
+       rht_for_each_entry_from(tpos, pos,                              \
+                               rht_ptr(rht_bucket(tbl, hash), tbl, hash), \
+                               tbl, hash, member)
 
 /**
  * rht_for_each_entry_safe - safely iterate over hash chain of given type
@@ -365,7 +466,7 @@ static inline struct rhash_head __rcu **rht_bucket_insert(
  * remove the loop cursor from the list.
  */
 #define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member)          \
-       for (pos = rht_dereference_bucket(*rht_bucket(tbl, hash), tbl, hash), \
+       for (pos = rht_ptr(rht_bucket(tbl, hash), tbl, hash),                 \
             next = !rht_is_a_nulls(pos) ?                                    \
                       rht_dereference_bucket(pos->next, tbl, hash) : NULL;   \
             (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member);          \
@@ -386,7 +487,7 @@ static inline struct rhash_head __rcu **rht_bucket_insert(
  */
 #define rht_for_each_rcu_from(pos, head, tbl, hash)                    \
        for (({barrier(); }),                                           \
-            pos = rht_dereference_bucket_rcu(head, tbl, hash);         \
+            pos = head;                                                \
             !rht_is_a_nulls(pos);                                      \
             pos = rcu_dereference_raw(pos->next))
 
@@ -400,8 +501,11 @@ static inline struct rhash_head __rcu **rht_bucket_insert(
  * the _rcu mutation primitives such as rhashtable_insert() as long as the
  * traversal is guarded by rcu_read_lock().
  */
-#define rht_for_each_rcu(pos, tbl, hash)                               \
-       rht_for_each_rcu_from(pos, *rht_bucket(tbl, hash), tbl, hash)
+#define rht_for_each_rcu(pos, tbl, hash)                       \
+       for (({barrier(); }),                                   \
+            pos = rht_ptr(rht_bucket(tbl, hash), tbl, hash);   \
+            !rht_is_a_nulls(pos);                              \
+            pos = rcu_dereference_raw(pos->next))
 
 /**
  * rht_for_each_entry_rcu_from - iterated over rcu hash chain from given head
@@ -418,7 +522,7 @@ static inline struct rhash_head __rcu **rht_bucket_insert(
  */
 #define rht_for_each_entry_rcu_from(tpos, pos, head, tbl, hash, member) \
        for (({barrier(); }),                                               \
-            pos = rht_dereference_bucket_rcu(head, tbl, hash);             \
+            pos = head;                                                    \
             (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member);        \
             pos = rht_dereference_bucket_rcu(pos->next, tbl, hash))
 
@@ -435,8 +539,10 @@ static inline struct rhash_head __rcu **rht_bucket_insert(
  * traversal is guarded by rcu_read_lock().
  */
 #define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member)              \
-       rht_for_each_entry_rcu_from(tpos, pos, *rht_bucket(tbl, hash), \
-                                       tbl, hash, member)
+       rht_for_each_entry_rcu_from(tpos, pos,                             \
+                                   rht_ptr(rht_bucket(tbl, hash),         \
+                                           tbl, hash),                    \
+                                   tbl, hash, member)
 
 /**
  * rhl_for_each_rcu - iterate over rcu hash table list
@@ -481,7 +587,7 @@ static inline struct rhash_head *__rhashtable_lookup(
                .ht = ht,
                .key = key,
        };
-       struct rhash_head __rcu * const *head;
+       struct rhash_lock_head __rcu * const *bkt;
        struct bucket_table *tbl;
        struct rhash_head *he;
        unsigned int hash;
@@ -489,9 +595,9 @@ static inline struct rhash_head *__rhashtable_lookup(
        tbl = rht_dereference_rcu(ht->tbl, ht);
 restart:
        hash = rht_key_hashfn(ht, tbl, key, params);
-       head = rht_bucket(tbl, hash);
+       bkt = rht_bucket(tbl, hash);
        do {
-               rht_for_each_rcu_from(he, *head, tbl, hash) {
+               rht_for_each_rcu_from(he, rht_ptr(bkt, tbl, hash), tbl, hash) {
                        if (params.obj_cmpfn ?
                            params.obj_cmpfn(&arg, rht_obj(ht, he)) :
                            rhashtable_compare(&arg, rht_obj(ht, he)))
@@ -501,7 +607,7 @@ static inline struct rhash_head *__rhashtable_lookup(
                /* An object might have been moved to a different hash chain,
                 * while we walk along it - better check and retry.
                 */
-       } while (he != RHT_NULLS_MARKER(head));
+       } while (he != RHT_NULLS_MARKER(bkt));
 
        /* Ensure we see any new tables. */
        smp_rmb();
@@ -597,10 +703,10 @@ static inline void *__rhashtable_insert_fast(
                .ht = ht,
                .key = key,
        };
+       struct rhash_lock_head __rcu **bkt;
        struct rhash_head __rcu **pprev;
        struct bucket_table *tbl;
        struct rhash_head *head;
-       spinlock_t *lock;
        unsigned int hash;
        int elasticity;
        void *data;
@@ -609,23 +715,22 @@ static inline void *__rhashtable_insert_fast(
 
        tbl = rht_dereference_rcu(ht->tbl, ht);
        hash = rht_head_hashfn(ht, tbl, obj, params);
-       lock = rht_bucket_lock(tbl, hash);
-       spin_lock_bh(lock);
+       elasticity = RHT_ELASTICITY;
+       bkt = rht_bucket_insert(ht, tbl, hash);
+       data = ERR_PTR(-ENOMEM);
+       if (!bkt)
+               goto out;
+       pprev = NULL;
+       rht_lock(tbl, bkt);
 
        if (unlikely(rcu_access_pointer(tbl->future_tbl))) {
 slow_path:
-               spin_unlock_bh(lock);
+               rht_unlock(tbl, bkt);
                rcu_read_unlock();
                return rhashtable_insert_slow(ht, key, obj);
        }
 
-       elasticity = RHT_ELASTICITY;
-       pprev = rht_bucket_insert(ht, tbl, hash);
-       data = ERR_PTR(-ENOMEM);
-       if (!pprev)
-               goto out;
-
-       rht_for_each_from(head, *pprev, tbl, hash) {
+       rht_for_each_from(head, rht_ptr(bkt, tbl, hash), tbl, hash) {
                struct rhlist_head *plist;
                struct rhlist_head *list;
 
@@ -641,7 +746,7 @@ static inline void *__rhashtable_insert_fast(
                data = rht_obj(ht, head);
 
                if (!rhlist)
-                       goto out;
+                       goto out_unlock;
 
 
                list = container_of(obj, struct rhlist_head, rhead);
@@ -650,9 +755,13 @@ static inline void *__rhashtable_insert_fast(
                RCU_INIT_POINTER(list->next, plist);
                head = rht_dereference_bucket(head->next, tbl, hash);
                RCU_INIT_POINTER(list->rhead.next, head);
-               rcu_assign_pointer(*pprev, obj);
-
-               goto good;
+               if (pprev) {
+                       rcu_assign_pointer(*pprev, obj);
+                       rht_unlock(tbl, bkt);
+               } else
+                       rht_assign_unlock(tbl, bkt, obj);
+               data = NULL;
+               goto out;
        }
 
        if (elasticity <= 0)
@@ -660,12 +769,13 @@ static inline void *__rhashtable_insert_fast(
 
        data = ERR_PTR(-E2BIG);
        if (unlikely(rht_grow_above_max(ht, tbl)))
-               goto out;
+               goto out_unlock;
 
        if (unlikely(rht_grow_above_100(ht, tbl)))
                goto slow_path;
 
-       head = rht_dereference_bucket(*pprev, tbl, hash);
+       /* Inserting at head of list makes unlocking free. */
+       head = rht_ptr(bkt, tbl, hash);
 
        RCU_INIT_POINTER(obj->next, head);
        if (rhlist) {
@@ -675,20 +785,21 @@ static inline void *__rhashtable_insert_fast(
                RCU_INIT_POINTER(list->next, NULL);
        }
 
-       rcu_assign_pointer(*pprev, obj);
-
        atomic_inc(&ht->nelems);
+       rht_assign_unlock(tbl, bkt, obj);
+
        if (rht_grow_above_75(ht, tbl))
                schedule_work(&ht->run_work);
 
-good:
        data = NULL;
-
 out:
-       spin_unlock_bh(lock);
        rcu_read_unlock();
 
        return data;
+
+out_unlock:
+       rht_unlock(tbl, bkt);
+       goto out;
 }
 
 /**
@@ -697,9 +808,9 @@ static inline void *__rhashtable_insert_fast(
  * @obj:       pointer to hash head inside object
  * @params:    hash table parameters
  *
- * Will take a per bucket spinlock to protect against mutual mutations
+ * Will take the per bucket bitlock to protect against mutual mutations
  * on the same bucket. Multiple insertions may occur in parallel unless
- * they map to the same bucket lock.
+ * they map to the same bucket.
  *
  * It is safe to call this function from atomic context.
  *
@@ -726,9 +837,9 @@ static inline int rhashtable_insert_fast(
  * @list:      pointer to hash list head inside object
  * @params:    hash table parameters
  *
- * Will take a per bucket spinlock to protect against mutual mutations
+ * Will take the per bucket bitlock to protect against mutual mutations
  * on the same bucket. Multiple insertions may occur in parallel unless
- * they map to the same bucket lock.
+ * they map to the same bucket.
  *
  * It is safe to call this function from atomic context.
  *
@@ -749,9 +860,9 @@ static inline int rhltable_insert_key(
  * @list:      pointer to hash list head inside object
  * @params:    hash table parameters
  *
- * Will take a per bucket spinlock to protect against mutual mutations
+ * Will take the per bucket bitlock to protect against mutual mutations
  * on the same bucket. Multiple insertions may occur in parallel unless
- * they map to the same bucket lock.
+ * they map to the same bucket.
  *
  * It is safe to call this function from atomic context.
  *
@@ -878,19 +989,20 @@ static inline int __rhashtable_remove_fast_one(
        struct rhash_head *obj, const struct rhashtable_params params,
        bool rhlist)
 {
+       struct rhash_lock_head __rcu **bkt;
        struct rhash_head __rcu **pprev;
        struct rhash_head *he;
-       spinlock_t * lock;
        unsigned int hash;
        int err = -ENOENT;
 
        hash = rht_head_hashfn(ht, tbl, obj, params);
-       lock = rht_bucket_lock(tbl, hash);
-
-       spin_lock_bh(lock);
+       bkt = rht_bucket_var(tbl, hash);
+       if (!bkt)
+               return -ENOENT;
+       pprev = NULL;
+       rht_lock(tbl, bkt);
 
-       pprev = rht_bucket_var(tbl, hash);
-       rht_for_each_from(he, *pprev, tbl, hash) {
+       rht_for_each_from(he, rht_ptr(bkt, tbl, hash), tbl, hash) {
                struct rhlist_head *list;
 
                list = container_of(he, struct rhlist_head, rhead);
@@ -930,12 +1042,17 @@ static inline int __rhashtable_remove_fast_one(
                        }
                }
 
-               rcu_assign_pointer(*pprev, obj);
-               break;
+               if (pprev) {
+                       rcu_assign_pointer(*pprev, obj);
+                       rht_unlock(tbl, bkt);
+               } else {
+                       rht_assign_unlock(tbl, bkt, obj);
+               }
+               goto unlocked;
        }
 
-       spin_unlock_bh(lock);
-
+       rht_unlock(tbl, bkt);
+unlocked:
        if (err > 0) {
                atomic_dec(&ht->nelems);
                if (unlikely(ht->p.automatic_shrinking &&
@@ -1024,9 +1141,9 @@ static inline int __rhashtable_replace_fast(
        struct rhash_head *obj_old, struct rhash_head *obj_new,
        const struct rhashtable_params params)
 {
+       struct rhash_lock_head __rcu **bkt;
        struct rhash_head __rcu **pprev;
        struct rhash_head *he;
-       spinlock_t *lock;
        unsigned int hash;
        int err = -ENOENT;
 
@@ -1037,25 +1154,33 @@ static inline int __rhashtable_replace_fast(
        if (hash != rht_head_hashfn(ht, tbl, obj_new, params))
                return -EINVAL;
 
-       lock = rht_bucket_lock(tbl, hash);
+       bkt = rht_bucket_var(tbl, hash);
+       if (!bkt)
+               return -ENOENT;
 
-       spin_lock_bh(lock);
+       pprev = NULL;
+       rht_lock(tbl, bkt);
 
-       pprev = rht_bucket_var(tbl, hash);
-       rht_for_each_from(he, *pprev, tbl, hash) {
+       rht_for_each_from(he, rht_ptr(bkt, tbl, hash), tbl, hash) {
                if (he != obj_old) {
                        pprev = &he->next;
                        continue;
                }
 
                rcu_assign_pointer(obj_new->next, obj_old->next);
-               rcu_assign_pointer(*pprev, obj_new);
+               if (pprev) {
+                       rcu_assign_pointer(*pprev, obj_new);
+                       rht_unlock(tbl, bkt);
+               } else {
+                       rht_assign_unlock(tbl, bkt, obj_new);
+               }
                err = 0;
-               break;
+               goto unlocked;
        }
 
-       spin_unlock_bh(lock);
+       rht_unlock(tbl, bkt);
 
+unlocked:
        return err;
 }
 
index ae56551976983dc508d3f3d63903d3a284a5f3a4..e412c092c1e821edd18f0c9dc0ed734ced5d6234 100644 (file)
@@ -418,10 +418,20 @@ static inline void set_restore_sigmask(void)
        set_thread_flag(TIF_RESTORE_SIGMASK);
        WARN_ON(!test_thread_flag(TIF_SIGPENDING));
 }
+
+static inline void clear_tsk_restore_sigmask(struct task_struct *tsk)
+{
+       clear_tsk_thread_flag(tsk, TIF_RESTORE_SIGMASK);
+}
+
 static inline void clear_restore_sigmask(void)
 {
        clear_thread_flag(TIF_RESTORE_SIGMASK);
 }
+static inline bool test_tsk_restore_sigmask(struct task_struct *tsk)
+{
+       return test_tsk_thread_flag(tsk, TIF_RESTORE_SIGMASK);
+}
 static inline bool test_restore_sigmask(void)
 {
        return test_thread_flag(TIF_RESTORE_SIGMASK);
@@ -439,6 +449,10 @@ static inline void set_restore_sigmask(void)
        current->restore_sigmask = true;
        WARN_ON(!test_thread_flag(TIF_SIGPENDING));
 }
+static inline void clear_tsk_restore_sigmask(struct task_struct *tsk)
+{
+       tsk->restore_sigmask = false;
+}
 static inline void clear_restore_sigmask(void)
 {
        current->restore_sigmask = false;
@@ -447,6 +461,10 @@ static inline bool test_restore_sigmask(void)
 {
        return current->restore_sigmask;
 }
+static inline bool test_tsk_restore_sigmask(struct task_struct *tsk)
+{
+       return tsk->restore_sigmask;
+}
 static inline bool test_and_clear_restore_sigmask(void)
 {
        if (!current->restore_sigmask)
index 9027a8c4219f9a3b0de149ff2974db50471c328a..a06275a618f0c4573a92eba1db7d348868f6d8a6 100644 (file)
@@ -657,7 +657,6 @@ typedef unsigned char *sk_buff_data_t;
  *     @tc_index: Traffic control index
  *     @hash: the packet hash
  *     @queue_mapping: Queue mapping for multiqueue devices
- *     @xmit_more: More SKBs are pending for this queue
  *     @pfmemalloc: skbuff was allocated from PFMEMALLOC reserves
  *     @active_extensions: active extensions (skb_ext_id types)
  *     @ndisc_nodetype: router type (from link layer)
@@ -764,7 +763,6 @@ struct sk_buff {
                                fclone:2,
                                peeked:1,
                                head_frag:1,
-                               xmit_more:1,
                                pfmemalloc:1;
 #ifdef CONFIG_SKB_EXTENSIONS
        __u8                    active_extensions;
@@ -3372,17 +3370,17 @@ struct sk_buff *__skb_try_recv_from_queue(struct sock *sk,
                                          unsigned int flags,
                                          void (*destructor)(struct sock *sk,
                                                           struct sk_buff *skb),
-                                         int *peeked, int *off, int *err,
+                                         int *off, int *err,
                                          struct sk_buff **last);
 struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned flags,
                                        void (*destructor)(struct sock *sk,
                                                           struct sk_buff *skb),
-                                       int *peeked, int *off, int *err,
+                                       int *off, int *err,
                                        struct sk_buff **last);
 struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
                                    void (*destructor)(struct sock *sk,
                                                       struct sk_buff *skb),
-                                   int *peeked, int *off, int *err);
+                                   int *off, int *err);
 struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
                                  int *err);
 __poll_t datagram_poll(struct file *file, struct socket *sock,
index 11b45f7ae4057c3b70105974b8f527dd6f30d8ba..9449b19c5f107a73bfe7eca9fe875708862dc3b9 100644 (file)
@@ -32,6 +32,8 @@
 #define SLAB_HWCACHE_ALIGN     ((slab_flags_t __force)0x00002000U)
 /* Use GFP_DMA memory */
 #define SLAB_CACHE_DMA         ((slab_flags_t __force)0x00004000U)
+/* Use GFP_DMA32 memory */
+#define SLAB_CACHE_DMA32       ((slab_flags_t __force)0x00008000U)
 /* DEBUG: Store the last owner for bug hunting */
 #define SLAB_STORE_USER                ((slab_flags_t __force)0x00010000U)
 /* Panic if kmem_cache_create() fails */
index 7927b875f80cf6ff74425e6d6a990aa27a032e21..6ab0a6fa512e75882f62a517e22f53ad946b2356 100644 (file)
@@ -150,6 +150,9 @@ extern void * memscan(void *,int,__kernel_size_t);
 #ifndef __HAVE_ARCH_MEMCMP
 extern int memcmp(const void *,const void *,__kernel_size_t);
 #endif
+#ifndef __HAVE_ARCH_BCMP
+extern int bcmp(const void *,const void *,__kernel_size_t);
+#endif
 #ifndef __HAVE_ARCH_MEMCHR
 extern void * memchr(const void *,int,__kernel_size_t);
 #endif
index ec861cd0cfe8ce9fa5425909d243c660231f06c9..52d41d0c1ae1d54b6829a10134318ee2835b9fd5 100644 (file)
@@ -304,12 +304,4 @@ rpc_clnt_swap_deactivate(struct rpc_clnt *clnt)
 }
 #endif /* CONFIG_SUNRPC_SWAP */
 
-static inline bool
-rpc_task_need_resched(const struct rpc_task *task)
-{
-       if (RPC_IS_QUEUED(task) || task->tk_callback)
-               return true;
-       return false;
-}
-
 #endif /* _LINUX_SUNRPC_SCHED_H_ */
index a240ed2a0372c20281e03a45fe49dea6a2fd60a3..ff56c443180cd6d35ec6f354ea2d519ce5443b83 100644 (file)
@@ -24,15 +24,17 @@ __printf(1, 2) void vbg_debug(const char *fmt, ...);
 #define vbg_debug pr_debug
 #endif
 
-int vbg_hgcm_connect(struct vbg_dev *gdev,
+int vbg_hgcm_connect(struct vbg_dev *gdev, u32 requestor,
                     struct vmmdev_hgcm_service_location *loc,
                     u32 *client_id, int *vbox_status);
 
-int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 client_id, int *vbox_status);
+int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 requestor,
+                       u32 client_id, int *vbox_status);
 
-int vbg_hgcm_call(struct vbg_dev *gdev, u32 client_id, u32 function,
-                 u32 timeout_ms, struct vmmdev_hgcm_function_parameter *parms,
-                 u32 parm_count, int *vbox_status);
+int vbg_hgcm_call(struct vbg_dev *gdev, u32 requestor, u32 client_id,
+                 u32 function, u32 timeout_ms,
+                 struct vmmdev_hgcm_function_parameter *parms, u32 parm_count,
+                 int *vbox_status);
 
 /**
  * Convert a VirtualBox status code to a standard Linux kernel return value.
index fab02133a9197a43cd9df33728e657e1679c5e16..3dc70adfe5f5edbbfa6f6508849d1d3630a46e2b 100644 (file)
@@ -63,7 +63,7 @@ struct virtqueue;
 /*
  * Creates a virtqueue and allocates the descriptor ring.  If
  * may_reduce_num is set, then this may allocate a smaller ring than
- * expected.  The caller should query virtqueue_get_ring_size to learn
+ * expected.  The caller should query virtqueue_get_vring_size to learn
  * the actual size of the ring.
  */
 struct virtqueue *vring_create_virtqueue(unsigned int index,
index 2bfb87eb98ce15cd693819d42205a036ae6dd42f..78c856cba4f538c078fada09ef3238c2bc220069 100644 (file)
@@ -61,10 +61,12 @@ int rxrpc_kernel_charge_accept(struct socket *, rxrpc_notify_rx_t,
                               rxrpc_user_attach_call_t, unsigned long, gfp_t,
                               unsigned int);
 void rxrpc_kernel_set_tx_length(struct socket *, struct rxrpc_call *, s64);
-u32 rxrpc_kernel_check_life(const struct socket *, const struct rxrpc_call *);
+bool rxrpc_kernel_check_life(const struct socket *, const struct rxrpc_call *,
+                            u32 *);
 void rxrpc_kernel_probe_life(struct socket *, struct rxrpc_call *);
 u32 rxrpc_kernel_get_epoch(struct socket *, struct rxrpc_call *);
 bool rxrpc_kernel_get_reply_time(struct socket *, struct rxrpc_call *,
                                 ktime_t *);
+bool rxrpc_kernel_call_is_complete(struct rxrpc_call *);
 
 #endif /* _NET_RXRPC_H */
index bb307a11ee636b7194bbe7d31c83f3d798b3379a..13bfeb712d36943cf9c04111a777d39cf08034a9 100644 (file)
@@ -7183,6 +7183,11 @@ void cfg80211_pmsr_complete(struct wireless_dev *wdev,
 #define wiphy_info(wiphy, format, args...)                     \
        dev_info(&(wiphy)->dev, format, ##args)
 
+#define wiphy_err_ratelimited(wiphy, format, args...)          \
+       dev_err_ratelimited(&(wiphy)->dev, format, ##args)
+#define wiphy_warn_ratelimited(wiphy, format, args...)         \
+       dev_warn_ratelimited(&(wiphy)->dev, format, ##args)
+
 #define wiphy_debug(wiphy, format, args...)                    \
        wiphy_printk(KERN_DEBUG, wiphy, format, ##args)
 
index 31d5cec4d06bd9f0910d43957c5d7d12f6a8c6d8..70c7d1ac83445f029528deeb24b7be267df418d6 100644 (file)
@@ -41,11 +41,13 @@ struct devlink {
 };
 
 struct devlink_port_attrs {
-       bool set;
+       u8 set:1,
+          split:1,
+          switch_port:1;
        enum devlink_port_flavour flavour;
        u32 port_number; /* same value as "split group" */
-       bool split;
        u32 split_subport_number;
+       struct netdev_phys_item_id switch_id;
 };
 
 struct devlink_port {
@@ -582,7 +584,9 @@ void devlink_port_type_clear(struct devlink_port *devlink_port);
 void devlink_port_attrs_set(struct devlink_port *devlink_port,
                            enum devlink_port_flavour flavour,
                            u32 port_number, bool split,
-                           u32 split_subport_number);
+                           u32 split_subport_number,
+                           const unsigned char *switch_id,
+                           unsigned char switch_id_len);
 int devlink_sb_register(struct devlink *devlink, unsigned int sb_index,
                        u32 size, u16 ingress_pools_count,
                        u16 egress_pools_count, u16 ingress_tc_count,
@@ -739,6 +743,8 @@ void devlink_compat_running_version(struct net_device *dev,
 int devlink_compat_flash_update(struct net_device *dev, const char *file_name);
 int devlink_compat_phys_port_name_get(struct net_device *dev,
                                      char *name, size_t len);
+int devlink_compat_switch_id_get(struct net_device *dev,
+                                struct netdev_phys_item_id *ppid);
 
 #else
 
@@ -760,6 +766,13 @@ devlink_compat_phys_port_name_get(struct net_device *dev,
        return -EOPNOTSUPP;
 }
 
+static inline int
+devlink_compat_switch_id_get(struct net_device *dev,
+                            struct netdev_phys_item_id *ppid)
+{
+       return -EOPNOTSUPP;
+}
+
 #endif
 
 #endif /* _NET_DEVLINK_H_ */
index ae480bba11f58e28435bd45c18632d2d5614a661..0cfc2f828b87d911b8263a3f92f1e38754fc2d45 100644 (file)
@@ -140,6 +140,7 @@ struct dsa_port {
        unsigned int            index;
        const char              *name;
        const struct dsa_port   *cpu_dp;
+       const char              *mac;
        struct device_node      *dn;
        unsigned int            ageing_time;
        u8                      stp_state;
index aa09ae5f01a58c5dad2aae2d697a15f3e606f7c8..2d3cce7c3e8a925f8f89719d2a8137ce4819edcf 100644 (file)
@@ -681,7 +681,7 @@ int ip_options_get_from_user(struct net *net, struct ip_options_rcu **optp,
                             unsigned char __user *data, int optlen);
 void ip_options_undo(struct ip_options *opt);
 void ip_forward_options(struct sk_buff *skb);
-int ip_options_rcv_srr(struct sk_buff *skb);
+int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev);
 
 /*
  *     Functions provided by ip_sockglue.c
index 58dbb4e82908cbabc6f5a501dd1b408517f04254..6b7557b71c8caef7785dd0c8c52dfb92cf4e8c52 100644 (file)
@@ -127,6 +127,10 @@ struct rt6_exception {
 
 struct fib6_nh {
        struct fib_nh_common    nh_common;
+
+#ifdef CONFIG_IPV6_ROUTER_PREF
+       unsigned long           last_probe;
+#endif
 };
 
 struct fib6_info {
@@ -155,10 +159,6 @@ struct fib6_info {
        struct rt6_info * __percpu      *rt6i_pcpu;
        struct rt6_exception_bucket __rcu *rt6i_exception_bucket;
 
-#ifdef CONFIG_IPV6_ROUTER_PREF
-       unsigned long                   last_probe;
-#endif
-
        u32                             fib6_metric;
        u8                              fib6_protocol;
        u8                              fib6_type;
@@ -190,6 +190,13 @@ struct rt6_info {
        unsigned short                  rt6i_nfheader_len;
 };
 
+struct fib6_result {
+       struct fib6_nh          *nh;
+       struct fib6_info        *f6i;
+       u32                     fib6_flags;
+       u8                      fib6_type;
+};
+
 #define for_each_fib6_node_rt_rcu(fn)                                  \
        for (rt = rcu_dereference((fn)->leaf); rt;                      \
             rt = rcu_dereference(rt->fib6_next))
@@ -384,18 +391,17 @@ struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
 /* called with rcu lock held; can return error pointer
  * caller needs to select path
  */
-struct fib6_info *fib6_lookup(struct net *net, int oif, struct flowi6 *fl6,
-                             int flags);
+int fib6_lookup(struct net *net, int oif, struct flowi6 *fl6,
+               struct fib6_result *res, int flags);
 
 /* called with rcu lock held; caller needs to select path */
-struct fib6_info *fib6_table_lookup(struct net *net, struct fib6_table *table,
-                                   int oif, struct flowi6 *fl6, int strict);
-
-struct fib6_info *fib6_multipath_select(const struct net *net,
-                                       struct fib6_info *match,
-                                       struct flowi6 *fl6, int oif,
-                                       const struct sk_buff *skb, int strict);
+int fib6_table_lookup(struct net *net, struct fib6_table *table,
+                     int oif, struct flowi6 *fl6, struct fib6_result *res,
+                     int strict);
 
+void fib6_select_path(const struct net *net, struct fib6_result *res,
+                     struct flowi6 *fl6, int oif, bool have_oif_match,
+                     const struct sk_buff *skb, int strict);
 struct fib6_node *fib6_node_lookup(struct fib6_node *root,
                                   const struct in6_addr *daddr,
                                   const struct in6_addr *saddr);
index 342180a7285c8bf1b22f4b0d85db936aedb03df2..46bbd8ff9cc6756bcd3b4f7d89f894b684d1f885 100644 (file)
@@ -69,7 +69,7 @@ static inline bool rt6_need_strict(const struct in6_addr *daddr)
 static inline bool rt6_qualify_for_ecmp(const struct fib6_info *f6i)
 {
        return !(f6i->fib6_flags & (RTF_ADDRCONF|RTF_DYNAMIC)) &&
-               f6i->fib6_nh.fib_nh_has_gw;
+               f6i->fib6_nh.fib_nh_gw_family;
 }
 
 void ip6_route_input(struct sk_buff *skb);
@@ -302,8 +302,9 @@ static inline unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst)
        return mtu;
 }
 
-u32 ip6_mtu_from_fib6(struct fib6_info *f6i, struct in6_addr *daddr,
-                     struct in6_addr *saddr);
+u32 ip6_mtu_from_fib6(const struct fib6_result *res,
+                     const struct in6_addr *daddr,
+                     const struct in6_addr *saddr);
 
 struct neighbour *ip6_neigh_lookup(const struct in6_addr *gw,
                                   struct net_device *dev, struct sk_buff *skb,
index 12a6d759cf57020284cdb314939a03cb02dab3ee..d8195c77e2479daea8aab8161cb0ef57138ec47c 100644 (file)
@@ -32,10 +32,14 @@ struct fib_config {
        u8                      fc_protocol;
        u8                      fc_scope;
        u8                      fc_type;
-       /* 3 bytes unused */
+       u8                      fc_gw_family;
+       /* 2 bytes unused */
        u32                     fc_table;
        __be32                  fc_dst;
-       __be32                  fc_gw;
+       union {
+               __be32          fc_gw4;
+               struct in6_addr fc_gw6;
+       };
        int                     fc_oif;
        u32                     fc_flags;
        u32                     fc_priority;
@@ -83,8 +87,8 @@ struct fib_nh_common {
        struct lwtunnel_state   *nhc_lwtstate;
        unsigned char           nhc_scope;
        u8                      nhc_family;
-       u8                      nhc_has_gw:1,
-                               unused:7;
+       u8                      nhc_gw_family;
+
        union {
                __be32          ipv4;
                struct in6_addr ipv6;
@@ -112,8 +116,7 @@ struct fib_nh {
 #define fib_nh_flags           nh_common.nhc_flags
 #define fib_nh_lws             nh_common.nhc_lwtstate
 #define fib_nh_scope           nh_common.nhc_scope
-#define fib_nh_family          nh_common.nhc_family
-#define fib_nh_has_gw          nh_common.nhc_has_gw
+#define fib_nh_gw_family       nh_common.nhc_gw_family
 #define fib_nh_gw4             nh_common.nhc_gw.ipv4
 #define fib_nh_gw6             nh_common.nhc_gw.ipv6
 #define fib_nh_weight          nh_common.nhc_weight
@@ -144,6 +147,7 @@ struct fib_info {
 #define fib_rtt fib_metrics->metrics[RTAX_RTT-1]
 #define fib_advmss fib_metrics->metrics[RTAX_ADVMSS-1]
        int                     fib_nhs;
+       bool                    fib_nh_is_v6;
        struct rcu_head         rcu;
        struct fib_nh           fib_nh[0];
 #define fib_dev                fib_nh[0].fib_nh_dev
@@ -156,15 +160,16 @@ struct fib_rule;
 
 struct fib_table;
 struct fib_result {
-       __be32          prefix;
-       unsigned char   prefixlen;
-       unsigned char   nh_sel;
-       unsigned char   type;
-       unsigned char   scope;
-       u32             tclassid;
-       struct fib_info *fi;
-       struct fib_table *table;
-       struct hlist_head *fa_head;
+       __be32                  prefix;
+       unsigned char           prefixlen;
+       unsigned char           nh_sel;
+       unsigned char           type;
+       unsigned char           scope;
+       u32                     tclassid;
+       struct fib_nh_common    *nhc;
+       struct fib_info         *fi;
+       struct fib_table        *table;
+       struct hlist_head       *fa_head;
 };
 
 struct fib_result_nl {
@@ -182,11 +187,10 @@ struct fib_result_nl {
        int             err;
 };
 
-#ifdef CONFIG_IP_ROUTE_MULTIPATH
-#define FIB_RES_NH(res)                ((res).fi->fib_nh[(res).nh_sel])
-#else /* CONFIG_IP_ROUTE_MULTIPATH */
-#define FIB_RES_NH(res)                ((res).fi->fib_nh[0])
-#endif /* CONFIG_IP_ROUTE_MULTIPATH */
+static inline struct fib_nh_common *fib_info_nhc(struct fib_info *fi, int nhsel)
+{
+       return &fi->fib_nh[nhsel].nh_common;
+}
 
 #ifdef CONFIG_IP_MULTIPLE_TABLES
 #define FIB_TABLE_HASHSZ 256
@@ -195,18 +199,11 @@ struct fib_result_nl {
 #endif
 
 __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
+__be32 fib_result_prefsrc(struct net *net, struct fib_result *res);
 
-#define FIB_RES_SADDR(net, res)                                \
-       ((FIB_RES_NH(res).nh_saddr_genid ==             \
-         atomic_read(&(net)->ipv4.dev_addr_genid)) ?   \
-        FIB_RES_NH(res).nh_saddr :                     \
-        fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
-#define FIB_RES_GW(res)                        (FIB_RES_NH(res).fib_nh_gw4)
-#define FIB_RES_DEV(res)               (FIB_RES_NH(res).fib_nh_dev)
-#define FIB_RES_OIF(res)               (FIB_RES_NH(res).fib_nh_oif)
-
-#define FIB_RES_PREFSRC(net, res)      ((res).fi->fib_prefsrc ? : \
-                                        FIB_RES_SADDR(net, res))
+#define FIB_RES_NHC(res)               ((res).nhc)
+#define FIB_RES_DEV(res)       (FIB_RES_NHC(res)->nhc_dev)
+#define FIB_RES_OIF(res)       (FIB_RES_NHC(res)->nhc_oif)
 
 struct fib_entry_notifier_info {
        struct fib_notifier_info info; /* must be first */
@@ -404,6 +401,8 @@ static inline bool fib4_rules_early_flow_dissect(struct net *net,
 /* Exported by fib_frontend.c */
 extern const struct nla_policy rtm_ipv4_policy[];
 void ip_fib_init(void);
+int fib_gw_from_via(struct fib_config *cfg, struct nlattr *nla,
+                   struct netlink_ext_ack *extack);
 __be32 fib_compute_spec_dst(struct sk_buff *skb);
 bool fib_info_nh_uses_dev(struct fib_info *fi, const struct net_device *dev);
 int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
@@ -453,10 +452,12 @@ struct fib_table *fib_trie_table(u32 id, struct fib_table *alias);
 static inline void fib_combine_itag(u32 *itag, const struct fib_result *res)
 {
 #ifdef CONFIG_IP_ROUTE_CLASSID
+       struct fib_nh_common *nhc = res->nhc;
+       struct fib_nh *nh = container_of(nhc, struct fib_nh, nh_common);
 #ifdef CONFIG_IP_MULTIPLE_TABLES
        u32 rtag;
 #endif
-       *itag = FIB_RES_NH(*res).nh_tclassid<<16;
+       *itag = nh->nh_tclassid << 16;
 #ifdef CONFIG_IP_MULTIPLE_TABLES
        rtag = res->tclassid;
        if (*itag == 0)
@@ -497,4 +498,9 @@ u32 ip_mtu_from_fib_result(struct fib_result *res, __be32 daddr);
 int ip_valid_fib_dump_req(struct net *net, const struct nlmsghdr *nlh,
                          struct fib_dump_filter *filter,
                          struct netlink_callback *cb);
+
+int fib_nexthop_info(struct sk_buff *skb, const struct fib_nh_common *nh,
+                    unsigned int *flags, bool skip_oif);
+int fib_add_nexthop(struct sk_buff *skb, const struct fib_nh_common *nh,
+                   int nh_weight);
 #endif  /* _NET_FIB_H */
index 047f9a5ccaad4a4c28e88838fa8a86e976b46a19..2ac40135b5765ffc373b6322dc79a22d96db275b 100644 (file)
@@ -600,6 +600,9 @@ struct ip_vs_dest_user_kern {
 
        /* Address family of addr */
        u16                     af;
+
+       u16                     tun_type;       /* tunnel type */
+       __be16                  tun_port;       /* tunnel port */
 };
 
 
@@ -660,6 +663,8 @@ struct ip_vs_dest {
        atomic_t                conn_flags;     /* flags to copy to conn */
        atomic_t                weight;         /* server weight */
        atomic_t                last_weight;    /* server latest weight */
+       __u16                   tun_type;       /* tunnel type */
+       __be16                  tun_port;       /* tunnel port */
 
        refcount_t              refcnt;         /* reference counter */
        struct ip_vs_stats      stats;          /* statistics */
index d8d9c0b0e8c0fe6097c011a3cbcca8a8484a25ef..6c0c4fde16f8902ecb9baf23cbc8db182dc2ab8c 100644 (file)
@@ -12,6 +12,9 @@
 
 /* structs from net/ip6_fib.h */
 struct fib6_info;
+struct fib6_nh;
+struct fib6_config;
+struct fib6_result;
 
 /* This is ugly, ideally these symbols should be built
  * into the core kernel.
@@ -26,20 +29,22 @@ struct ipv6_stub {
        int (*ipv6_route_input)(struct sk_buff *skb);
 
        struct fib6_table *(*fib6_get_table)(struct net *net, u32 id);
-       struct fib6_info *(*fib6_lookup)(struct net *net, int oif,
-                                        struct flowi6 *fl6, int flags);
-       struct fib6_info *(*fib6_table_lookup)(struct net *net,
-                                             struct fib6_table *table,
-                                             int oif, struct flowi6 *fl6,
-                                             int flags);
-       struct fib6_info *(*fib6_multipath_select)(const struct net *net,
-                                                  struct fib6_info *f6i,
-                                                  struct flowi6 *fl6, int oif,
-                                                  const struct sk_buff *skb,
-                                                  int strict);
-       u32 (*ip6_mtu_from_fib6)(struct fib6_info *f6i, struct in6_addr *daddr,
-                                struct in6_addr *saddr);
+       int (*fib6_lookup)(struct net *net, int oif, struct flowi6 *fl6,
+                          struct fib6_result *res, int flags);
+       int (*fib6_table_lookup)(struct net *net, struct fib6_table *table,
+                                int oif, struct flowi6 *fl6,
+                                struct fib6_result *res, int flags);
+       void (*fib6_select_path)(const struct net *net, struct fib6_result *res,
+                                struct flowi6 *fl6, int oif, bool oif_match,
+                                const struct sk_buff *skb, int strict);
+       u32 (*ip6_mtu_from_fib6)(const struct fib6_result *res,
+                                const struct in6_addr *daddr,
+                                const struct in6_addr *saddr);
 
+       int (*fib6_nh_init)(struct net *net, struct fib6_nh *fib6_nh,
+                           struct fib6_config *cfg, gfp_t gfp_flags,
+                           struct netlink_ext_ack *extack);
+       void (*fib6_nh_release)(struct fib6_nh *fib6_nh);
        void (*udpv6_encap_enable)(void);
        void (*ndisc_send_na)(struct net_device *dev, const struct in6_addr *daddr,
                              const struct in6_addr *solicited_addr,
index ac2ed8ec662bd97ebe0337085e78e5a61906d499..112dc18c658f15f79525cae64afa0ea38e2a1159 100644 (file)
@@ -6231,8 +6231,6 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
  * @hw: pointer as obtained from ieee80211_alloc_hw()
  * @ac: AC number to return packets from.
  *
- * Should only be called between calls to ieee80211_txq_schedule_start()
- * and ieee80211_txq_schedule_end().
  * Returns the next txq if successful, %NULL if no queue is eligible. If a txq
  * is returned, it should be returned with ieee80211_return_txq() after the
  * driver has finished scheduling it.
@@ -6240,51 +6238,58 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
 struct ieee80211_txq *ieee80211_next_txq(struct ieee80211_hw *hw, u8 ac);
 
 /**
- * ieee80211_return_txq - return a TXQ previously acquired by ieee80211_next_txq()
- *
- * @hw: pointer as obtained from ieee80211_alloc_hw()
- * @txq: pointer obtained from station or virtual interface
- *
- * Should only be called between calls to ieee80211_txq_schedule_start()
- * and ieee80211_txq_schedule_end().
- */
-void ieee80211_return_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq);
-
-/**
- * ieee80211_txq_schedule_start - acquire locks for safe scheduling of an AC
+ * ieee80211_txq_schedule_start - start new scheduling round for TXQs
  *
  * @hw: pointer as obtained from ieee80211_alloc_hw()
  * @ac: AC number to acquire locks for
  *
- * Acquire locks needed to schedule TXQs from the given AC. Should be called
- * before ieee80211_next_txq() or ieee80211_return_txq().
+ * Should be called before ieee80211_next_txq() or ieee80211_return_txq().
+ * The driver must not call multiple TXQ scheduling rounds concurrently.
  */
-void ieee80211_txq_schedule_start(struct ieee80211_hw *hw, u8 ac)
-       __acquires(txq_lock);
+void ieee80211_txq_schedule_start(struct ieee80211_hw *hw, u8 ac);
+
+/* (deprecated) */
+static inline void ieee80211_txq_schedule_end(struct ieee80211_hw *hw, u8 ac)
+{
+}
+
+void __ieee80211_schedule_txq(struct ieee80211_hw *hw,
+                             struct ieee80211_txq *txq, bool force);
 
 /**
- * ieee80211_txq_schedule_end - release locks for safe scheduling of an AC
+ * ieee80211_schedule_txq - schedule a TXQ for transmission
  *
  * @hw: pointer as obtained from ieee80211_alloc_hw()
- * @ac: AC number to acquire locks for
+ * @txq: pointer obtained from station or virtual interface
  *
- * Release locks previously acquired by ieee80211_txq_schedule_end().
+ * Schedules a TXQ for transmission if it is not already scheduled,
+ * even if mac80211 does not have any packets buffered.
+ *
+ * The driver may call this function if it has buffered packets for
+ * this TXQ internally.
  */
-void ieee80211_txq_schedule_end(struct ieee80211_hw *hw, u8 ac)
-       __releases(txq_lock);
+static inline void
+ieee80211_schedule_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
+{
+       __ieee80211_schedule_txq(hw, txq, true);
+}
 
 /**
- * ieee80211_schedule_txq - schedule a TXQ for transmission
+ * ieee80211_return_txq - return a TXQ previously acquired by ieee80211_next_txq()
  *
  * @hw: pointer as obtained from ieee80211_alloc_hw()
  * @txq: pointer obtained from station or virtual interface
+ * @force: schedule txq even if mac80211 does not have any buffered packets.
  *
- * Schedules a TXQ for transmission if it is not already scheduled. Takes a
- * lock, which means it must *not* be called between
- * ieee80211_txq_schedule_start() and ieee80211_txq_schedule_end()
+ * The driver may set force=true if it has buffered packets for this TXQ
+ * internally.
  */
-void ieee80211_schedule_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
-       __acquires(txq_lock) __releases(txq_lock);
+static inline void
+ieee80211_return_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq,
+                    bool force)
+{
+       __ieee80211_schedule_txq(hw, txq, force);
+}
 
 /**
  * ieee80211_txq_may_transmit - check whether TXQ is allowed to transmit
index ddfbb591e2c568cbaf186417b8cb9ab33a4075e2..3661500530432745eb6b13cb20e8d5194a73816e 100644 (file)
@@ -2,6 +2,8 @@
 #ifndef _NDISC_H
 #define _NDISC_H
 
+#include <net/ipv6_stubs.h>
+
 /*
  *     ICMP codes for neighbour discovery messages
  */
@@ -379,6 +381,14 @@ static inline struct neighbour *__ipv6_neigh_lookup_noref(struct net_device *dev
        return ___neigh_lookup_noref(&nd_tbl, neigh_key_eq128, ndisc_hashfn, pkey, dev);
 }
 
+static inline
+struct neighbour *__ipv6_neigh_lookup_noref_stub(struct net_device *dev,
+                                                const void *pkey)
+{
+       return ___neigh_lookup_noref(ipv6_stub->nd_tbl, neigh_key_eq128,
+                                    ndisc_hashfn, pkey, dev);
+}
+
 static inline struct neighbour *__ipv6_neigh_lookup(struct net_device *dev, const void *pkey)
 {
        struct neighbour *n;
@@ -409,6 +419,36 @@ static inline void __ipv6_confirm_neigh(struct net_device *dev,
        rcu_read_unlock_bh();
 }
 
+static inline void __ipv6_confirm_neigh_stub(struct net_device *dev,
+                                            const void *pkey)
+{
+       struct neighbour *n;
+
+       rcu_read_lock_bh();
+       n = __ipv6_neigh_lookup_noref_stub(dev, pkey);
+       if (n) {
+               unsigned long now = jiffies;
+
+               /* avoid dirtying neighbour */
+               if (n->confirmed != now)
+                       n->confirmed = now;
+       }
+       rcu_read_unlock_bh();
+}
+
+/* uses ipv6_stub and is meant for use outside of IPv6 core */
+static inline struct neighbour *ip_neigh_gw6(struct net_device *dev,
+                                            const void *addr)
+{
+       struct neighbour *neigh;
+
+       neigh = __ipv6_neigh_lookup_noref_stub(dev, addr);
+       if (unlikely(!neigh))
+               neigh = __neigh_create(ipv6_stub->nd_tbl, addr, dev, false);
+
+       return neigh;
+}
+
 int ndisc_init(void);
 int ndisc_late_init(void);
 
index 7c1ab9edba03873ffc9ea029e7925e12051f6b02..50a67bd6a43413bf69e2ad7b7c27e8460b6fb152 100644 (file)
@@ -205,6 +205,8 @@ struct neigh_table {
        int                     (*pconstructor)(struct pneigh_entry *);
        void                    (*pdestructor)(struct pneigh_entry *);
        void                    (*proxy_redo)(struct sk_buff *skb);
+       bool                    (*allow_add)(const struct net_device *dev,
+                                            struct netlink_ext_ack *extack);
        char                    *id;
        struct neigh_parms      parms;
        struct list_head        parms_list;
@@ -498,11 +500,12 @@ static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb
        return dev_queue_xmit(skb);
 }
 
-static inline int neigh_output(struct neighbour *n, struct sk_buff *skb)
+static inline int neigh_output(struct neighbour *n, struct sk_buff *skb,
+                              bool skip_cache)
 {
        const struct hh_cache *hh = &n->hh;
 
-       if ((n->nud_state & NUD_CONNECTED) && hh->hh_len)
+       if ((n->nud_state & NUD_CONNECTED) && hh->hh_len && !skip_cache)
                return neigh_hh_output(hh, skb);
        else
                return n->output(n, skb);
index a68ced28d8f47e09c6f6beabc56a22a04ae3f163..12689ddfc24c44fe3297d1eda548811d8061670b 100644 (file)
@@ -59,6 +59,7 @@ struct net {
                                                 */
        spinlock_t              rules_mod_lock;
 
+       u32                     hash_mix;
        atomic64_t              cookie_gen;
 
        struct list_head        list;           /* list of network namespaces */
diff --git a/include/net/netfilter/ipv4/nf_nat_masquerade.h b/include/net/netfilter/ipv4/nf_nat_masquerade.h
deleted file mode 100644 (file)
index 13d5520..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _NF_NAT_MASQUERADE_IPV4_H_
-#define _NF_NAT_MASQUERADE_IPV4_H_
-
-#include <net/netfilter/nf_nat.h>
-
-unsigned int
-nf_nat_masquerade_ipv4(struct sk_buff *skb, unsigned int hooknum,
-                      const struct nf_nat_range2 *range,
-                      const struct net_device *out);
-
-int nf_nat_masquerade_ipv4_register_notifier(void);
-void nf_nat_masquerade_ipv4_unregister_notifier(void);
-
-#endif /*_NF_NAT_MASQUERADE_IPV4_H_ */
diff --git a/include/net/netfilter/ipv6/nf_nat_masquerade.h b/include/net/netfilter/ipv6/nf_nat_masquerade.h
deleted file mode 100644 (file)
index 2917bf9..0000000
+++ /dev/null
@@ -1,11 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _NF_NAT_MASQUERADE_IPV6_H_
-#define _NF_NAT_MASQUERADE_IPV6_H_
-
-unsigned int
-nf_nat_masquerade_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range,
-                      const struct net_device *out);
-int nf_nat_masquerade_ipv6_register_notifier(void);
-void nf_nat_masquerade_ipv6_unregister_notifier(void);
-
-#endif /* _NF_NAT_MASQUERADE_IPV6_H_ */
index 006e430d1cdfb7f211e1f655337af7f01367f3e8..93ce6b0daaba9accf8dd270653b51f4a3d815c02 100644 (file)
@@ -48,7 +48,7 @@ struct nf_conntrack_expect {
        /* Expectation class */
        unsigned int class;
 
-#ifdef CONFIG_NF_NAT_NEEDED
+#if IS_ENABLED(CONFIG_NF_NAT)
        union nf_inet_addr saved_addr;
        /* This is the original per-proto part, used to map the
         * expected connection the way the recipient expects. */
index cf332c4e0b325bc06e7926fb5f886b0a3dd07e42..423cda2c65425e43659f2558f67ef4c0fe94dea2 100644 (file)
@@ -69,9 +69,9 @@ static inline bool nf_nat_oif_changed(unsigned int hooknum,
 #endif
 }
 
-int nf_nat_register_fn(struct net *net, const struct nf_hook_ops *ops,
+int nf_nat_register_fn(struct net *net, u8 pf, const struct nf_hook_ops *ops,
                       const struct nf_hook_ops *nat_ops, unsigned int ops_count);
-void nf_nat_unregister_fn(struct net *net, const struct nf_hook_ops *ops,
+void nf_nat_unregister_fn(struct net *net, u8 pf, const struct nf_hook_ops *ops,
                          unsigned int ops_count);
 
 unsigned int nf_nat_packet(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
@@ -98,6 +98,9 @@ void nf_nat_ipv4_unregister_fn(struct net *net, const struct nf_hook_ops *ops);
 int nf_nat_ipv6_register_fn(struct net *net, const struct nf_hook_ops *ops);
 void nf_nat_ipv6_unregister_fn(struct net *net, const struct nf_hook_ops *ops);
 
+int nf_nat_inet_register_fn(struct net *net, const struct nf_hook_ops *ops);
+void nf_nat_inet_unregister_fn(struct net *net, const struct nf_hook_ops *ops);
+
 unsigned int
 nf_nat_inet_fn(void *priv, struct sk_buff *skb,
               const struct nf_hook_state *state);
diff --git a/include/net/netfilter/nf_nat_masquerade.h b/include/net/netfilter/nf_nat_masquerade.h
new file mode 100644 (file)
index 0000000..54a14d6
--- /dev/null
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _NF_NAT_MASQUERADE_H_
+#define _NF_NAT_MASQUERADE_H_
+
+#include <net/netfilter/nf_nat.h>
+
+unsigned int
+nf_nat_masquerade_ipv4(struct sk_buff *skb, unsigned int hooknum,
+                      const struct nf_nat_range2 *range,
+                      const struct net_device *out);
+
+int nf_nat_masquerade_inet_register_notifiers(void);
+void nf_nat_masquerade_inet_unregister_notifiers(void);
+
+unsigned int
+nf_nat_masquerade_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range,
+                      const struct net_device *out);
+
+#endif /*_NF_NAT_MASQUERADE_H_ */
index a50a69f5334c8647d129ac1edce909edfc442691..7239105d9d2e27bac3d0714710cee815d082d051 100644 (file)
@@ -119,4 +119,7 @@ nfqueue_hash(const struct sk_buff *skb, u16 queue, u16 queues_total, u8 family,
        return queue;
 }
 
+int nf_queue(struct sk_buff *skb, struct nf_hook_state *state,
+            const struct nf_hook_entries *entries, unsigned int index,
+            unsigned int verdict);
 #endif /* _NF_QUEUE_H */
index 3e9ab643eedfe3056d74e4b247dfe08a0c153c06..2d5a0a1a87b8e9a3d42ef9d875ed886a051779d5 100644 (file)
@@ -475,8 +475,6 @@ void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set,
                              enum nft_trans_phase phase);
 int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
                       struct nft_set_binding *binding);
-void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
-                         struct nft_set_binding *binding, bool commit);
 void nf_tables_destroy_set(const struct nft_ctx *ctx, struct nft_set *set);
 
 /**
@@ -1411,4 +1409,6 @@ struct nft_trans_flowtable {
 int __init nft_chain_filter_init(void);
 void nft_chain_filter_fini(void);
 
+void __init nft_chain_route_init(void);
+void nft_chain_route_fini(void);
 #endif /* _NET_NF_TABLES_H */
index 16a842456189f2fc1a3363685b5dd4310a32b2b8..d9b665151f3d9e916f35620141542a5a145e6123 100644 (file)
@@ -2,16 +2,10 @@
 #ifndef __NET_NS_HASH_H__
 #define __NET_NS_HASH_H__
 
-#include <asm/cache.h>
-
-struct net;
+#include <net/net_namespace.h>
 
 static inline u32 net_hash_mix(const struct net *net)
 {
-#ifdef CONFIG_NET_NS
-       return (u32)(((unsigned long)net) >> ilog2(sizeof(*net)));
-#else
-       return 0;
-#endif
+       return net->hash_mix;
 }
 #endif
index 5a0714ff500fd09bd288360a83dad57952e5efaf..80f15b1c1a489a71479845ae0d077875b1a52f66 100644 (file)
@@ -266,7 +266,7 @@ void nr_stop_idletimer(struct sock *);
 int nr_t1timer_running(struct sock *);
 
 /* sysctl_net_netrom.c */
-void nr_register_sysctl(void);
+int nr_register_sysctl(void);
 void nr_unregister_sysctl(void);
 
 #endif
index 87499b6b35d6dd75ea3058449c5db484e2aca611..df5c69db68afc334d0ac51c031ca6120d8b7e6e9 100644 (file)
@@ -166,7 +166,7 @@ struct nci_conn_info {
  * According to specification 102 622 chapter 4.4 Pipes,
  * the pipe identifier is 7 bits long.
  */
-#define NCI_HCI_MAX_PIPES          127
+#define NCI_HCI_MAX_PIPES          128
 
 struct nci_hci_gate {
        u8 gate;
index 9883dc82f7233dede86ec2e5cfeddf09cf56d62d..96f6c9ae33c2357b1d5c9877c95d8bcd482c18cb 100644 (file)
@@ -29,6 +29,8 @@
 #include <net/flow.h>
 #include <net/inet_sock.h>
 #include <net/ip_fib.h>
+#include <net/arp.h>
+#include <net/ndisc.h>
 #include <linux/in_route.h>
 #include <linux/rtnetlink.h>
 #include <linux/rcupdate.h>
@@ -55,12 +57,15 @@ struct rtable {
        unsigned int            rt_flags;
        __u16                   rt_type;
        __u8                    rt_is_input;
-       __u8                    rt_uses_gateway;
+       u8                      rt_gw_family;
 
        int                     rt_iif;
 
        /* Info on neighbour */
-       __be32                  rt_gateway;
+       union {
+               __be32          rt_gw4;
+               struct in6_addr rt_gw6;
+       };
 
        /* Miscellaneous cached information */
        u32                     rt_mtu_locked:1,
@@ -82,8 +87,8 @@ static inline bool rt_is_output_route(const struct rtable *rt)
 
 static inline __be32 rt_nexthop(const struct rtable *rt, __be32 daddr)
 {
-       if (rt->rt_gateway)
-               return rt->rt_gateway;
+       if (rt->rt_gw_family == AF_INET)
+               return rt->rt_gw4;
        return daddr;
 }
 
@@ -347,4 +352,34 @@ static inline int ip4_dst_hoplimit(const struct dst_entry *dst)
        return hoplimit;
 }
 
+static inline struct neighbour *ip_neigh_gw4(struct net_device *dev,
+                                            __be32 daddr)
+{
+       struct neighbour *neigh;
+
+       neigh = __ipv4_neigh_lookup_noref(dev, daddr);
+       if (unlikely(!neigh))
+               neigh = __neigh_create(&arp_tbl, &daddr, dev, false);
+
+       return neigh;
+}
+
+static inline struct neighbour *ip_neigh_for_gw(struct rtable *rt,
+                                               struct sk_buff *skb,
+                                               bool *is_v6gw)
+{
+       struct net_device *dev = rt->dst.dev;
+       struct neighbour *neigh;
+
+       if (likely(rt->rt_gw_family == AF_INET)) {
+               neigh = ip_neigh_gw4(dev, rt->rt_gw4);
+       } else if (rt->rt_gw_family == AF_INET6) {
+               neigh = ip_neigh_gw6(dev, &rt->rt_gw6);
+               *is_v6gw = true;
+       } else {
+               neigh = ip_neigh_gw4(dev, ip_hdr(skb)->daddr);
+       }
+       return neigh;
+}
+
 #endif /* _ROUTE_H */
index 2269383c1399fc13976655c103f0effdb7bf3632..e8f85cd2afcede0dd61395d791b088e44f8bd860 100644 (file)
@@ -52,10 +52,7 @@ struct qdisc_size_table {
 struct qdisc_skb_head {
        struct sk_buff  *head;
        struct sk_buff  *tail;
-       union {
-               u32             qlen;
-               atomic_t        atomic_qlen;
-       };
+       __u32           qlen;
        spinlock_t      lock;
 };
 
@@ -146,9 +143,14 @@ static inline bool qdisc_is_running(struct Qdisc *qdisc)
        return (raw_read_seqcount(&qdisc->running) & 1) ? true : false;
 }
 
+static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
+{
+       return q->flags & TCQ_F_CPUSTATS;
+}
+
 static inline bool qdisc_is_empty(const struct Qdisc *qdisc)
 {
-       if (qdisc->flags & TCQ_F_NOLOCK)
+       if (qdisc_is_percpu_stats(qdisc))
                return qdisc->empty;
        return !qdisc->q.qlen;
 }
@@ -481,19 +483,27 @@ static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
        BUILD_BUG_ON(sizeof(qcb->data) < sz);
 }
 
+static inline int qdisc_qlen_cpu(const struct Qdisc *q)
+{
+       return this_cpu_ptr(q->cpu_qstats)->qlen;
+}
+
 static inline int qdisc_qlen(const struct Qdisc *q)
 {
        return q->q.qlen;
 }
 
-static inline u32 qdisc_qlen_sum(const struct Qdisc *q)
+static inline int qdisc_qlen_sum(const struct Qdisc *q)
 {
-       u32 qlen = q->qstats.qlen;
+       __u32 qlen = q->qstats.qlen;
+       int i;
 
-       if (q->flags & TCQ_F_NOLOCK)
-               qlen += atomic_read(&q->q.atomic_qlen);
-       else
+       if (qdisc_is_percpu_stats(q)) {
+               for_each_possible_cpu(i)
+                       qlen += per_cpu_ptr(q->cpu_qstats, i)->qlen;
+       } else {
                qlen += q->q.qlen;
+       }
 
        return qlen;
 }
@@ -747,7 +757,7 @@ static inline bool qdisc_all_tx_empty(const struct net_device *dev)
                struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
                const struct Qdisc *q = rcu_dereference(txq->qdisc);
 
-               if (q->q.qlen) {
+               if (!qdisc_is_empty(q)) {
                        rcu_read_unlock();
                        return false;
                }
@@ -817,11 +827,6 @@ static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
        return sch->enqueue(skb, sch, to_free);
 }
 
-static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
-{
-       return q->flags & TCQ_F_CPUSTATS;
-}
-
 static inline void _bstats_update(struct gnet_stats_basic_packed *bstats,
                                  __u64 bytes, __u32 packets)
 {
@@ -889,14 +894,14 @@ static inline void qdisc_qstats_cpu_backlog_inc(struct Qdisc *sch,
        this_cpu_add(sch->cpu_qstats->backlog, qdisc_pkt_len(skb));
 }
 
-static inline void qdisc_qstats_atomic_qlen_inc(struct Qdisc *sch)
+static inline void qdisc_qstats_cpu_qlen_inc(struct Qdisc *sch)
 {
-       atomic_inc(&sch->q.atomic_qlen);
+       this_cpu_inc(sch->cpu_qstats->qlen);
 }
 
-static inline void qdisc_qstats_atomic_qlen_dec(struct Qdisc *sch)
+static inline void qdisc_qstats_cpu_qlen_dec(struct Qdisc *sch)
 {
-       atomic_dec(&sch->q.atomic_qlen);
+       this_cpu_dec(sch->cpu_qstats->qlen);
 }
 
 static inline void qdisc_qstats_cpu_requeues_inc(struct Qdisc *sch)
@@ -934,6 +939,41 @@ static inline void qdisc_qstats_overlimit(struct Qdisc *sch)
        sch->qstats.overlimits++;
 }
 
+static inline int qdisc_qstats_copy(struct gnet_dump *d, struct Qdisc *sch)
+{
+       __u32 qlen = qdisc_qlen_sum(sch);
+
+       return gnet_stats_copy_queue(d, sch->cpu_qstats, &sch->qstats, qlen);
+}
+
+static inline void qdisc_qstats_qlen_backlog(struct Qdisc *sch,  __u32 *qlen,
+                                            __u32 *backlog)
+{
+       struct gnet_stats_queue qstats = { 0 };
+       __u32 len = qdisc_qlen_sum(sch);
+
+       __gnet_stats_copy_queue(&qstats, sch->cpu_qstats, &sch->qstats, len);
+       *qlen = qstats.qlen;
+       *backlog = qstats.backlog;
+}
+
+static inline void qdisc_tree_flush_backlog(struct Qdisc *sch)
+{
+       __u32 qlen, backlog;
+
+       qdisc_qstats_qlen_backlog(sch, &qlen, &backlog);
+       qdisc_tree_reduce_backlog(sch, qlen, backlog);
+}
+
+static inline void qdisc_purge_queue(struct Qdisc *sch)
+{
+       __u32 qlen, backlog;
+
+       qdisc_qstats_qlen_backlog(sch, &qlen, &backlog);
+       qdisc_reset(sch);
+       qdisc_tree_reduce_backlog(sch, qlen, backlog);
+}
+
 static inline void qdisc_skb_head_init(struct qdisc_skb_head *qh)
 {
        qh->head = NULL;
@@ -1071,6 +1111,32 @@ static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch)
        return skb;
 }
 
+static inline void qdisc_update_stats_at_dequeue(struct Qdisc *sch,
+                                                struct sk_buff *skb)
+{
+       if (qdisc_is_percpu_stats(sch)) {
+               qdisc_qstats_cpu_backlog_dec(sch, skb);
+               qdisc_bstats_cpu_update(sch, skb);
+               qdisc_qstats_cpu_qlen_dec(sch);
+       } else {
+               qdisc_qstats_backlog_dec(sch, skb);
+               qdisc_bstats_update(sch, skb);
+               sch->q.qlen--;
+       }
+}
+
+static inline void qdisc_update_stats_at_enqueue(struct Qdisc *sch,
+                                                unsigned int pkt_len)
+{
+       if (qdisc_is_percpu_stats(sch)) {
+               qdisc_qstats_cpu_qlen_inc(sch);
+               this_cpu_add(sch->cpu_qstats->backlog, pkt_len);
+       } else {
+               sch->qstats.backlog += pkt_len;
+               sch->q.qlen++;
+       }
+}
+
 /* use instead of qdisc->dequeue() for all qdiscs queried with ->peek() */
 static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch)
 {
@@ -1078,8 +1144,13 @@ static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch)
 
        if (skb) {
                skb = __skb_dequeue(&sch->gso_skb);
-               qdisc_qstats_backlog_dec(sch, skb);
-               sch->q.qlen--;
+               if (qdisc_is_percpu_stats(sch)) {
+                       qdisc_qstats_cpu_backlog_dec(sch, skb);
+                       qdisc_qstats_cpu_qlen_dec(sch);
+               } else {
+                       qdisc_qstats_backlog_dec(sch, skb);
+                       sch->q.qlen--;
+               }
        } else {
                skb = sch->dequeue(sch);
        }
@@ -1117,13 +1188,8 @@ static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
        sch_tree_lock(sch);
        old = *pold;
        *pold = new;
-       if (old != NULL) {
-               unsigned int qlen = old->q.qlen;
-               unsigned int backlog = old->qstats.backlog;
-
-               qdisc_reset(old);
-               qdisc_tree_reduce_backlog(old, qlen, backlog);
-       }
+       if (old != NULL)
+               qdisc_tree_flush_backlog(old);
        sch_tree_unlock(sch);
 
        return old;
index 1d13ec3f2707e5777d564aaf15690f10a15b05b6..eefdfa5abf6e088aaf6a27d4113007c19d6d2f8f 100644 (file)
@@ -421,7 +421,7 @@ static inline void sctp_skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
        /*
         * This mimics the behavior of skb_set_owner_r
         */
-       sk->sk_forward_alloc -= event->rmem_len;
+       sk_mem_charge(sk, event->rmem_len);
 }
 
 /* Tests if the list has one and only one entry. */
index bb0ecba3db2b35e7d6539cfc7133e8b7b63c2207..f4ac7117ff2918868ea97e384cbc2087a1a968d8 100644 (file)
@@ -59,7 +59,7 @@ void sctp_ulpq_free(struct sctp_ulpq *);
 int sctp_ulpq_tail_data(struct sctp_ulpq *, struct sctp_chunk *, gfp_t);
 
 /* Add a new event for propagation to the ULP. */
-int sctp_ulpq_tail_event(struct sctp_ulpq *, struct sctp_ulpevent *ev);
+int sctp_ulpq_tail_event(struct sctp_ulpq *, struct sk_buff_head *skb_list);
 
 /* Renege previously received chunks.  */
 void sctp_ulpq_renege(struct sctp_ulpq *, struct sctp_chunk *, gfp_t);
index 7fa2232785226bcafd46b230559964fd16f3c4f4..bdd77bbce7d887dcc2dc90b754ae016628d559c7 100644 (file)
@@ -2091,12 +2091,6 @@ static inline bool skwq_has_sleeper(struct socket_wq *wq)
  * @p:              poll_table
  *
  * See the comments in the wq_has_sleeper function.
- *
- * Do not derive sock from filp->private_data here. An SMC socket establishes
- * an internal TCP socket that is used in the fallback case. All socket
- * operations on the SMC socket are then forwarded to the TCP socket. In case of
- * poll, the filp->private_data pointer references the SMC socket because the
- * TCP socket has no file assigned.
  */
 static inline void sock_poll_wait(struct file *filp, struct socket *sock,
                                  poll_table *p)
index 3ce71d78414c8c4e58d456a81b371949e16dcb90..d9d0ac66f0402deb90d5f622c7c40b4d32dbbec0 100644 (file)
@@ -318,6 +318,7 @@ int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
 int tls_device_sendpage(struct sock *sk, struct page *page,
                        int offset, size_t size, int flags);
 void tls_device_sk_destruct(struct sock *sk);
+void tls_device_free_resources_tx(struct sock *sk);
 void tls_device_init(void);
 void tls_device_cleanup(void);
 int tls_tx_records(struct sock *sk, int flags);
@@ -341,6 +342,7 @@ int tls_push_sg(struct sock *sk, struct tls_context *ctx,
                int flags);
 int tls_push_partial_record(struct sock *sk, struct tls_context *ctx,
                            int flags);
+bool tls_free_partial_record(struct sock *sk, struct tls_context *ctx);
 
 static inline struct tls_msg *tls_msg(struct sk_buff *skb)
 {
@@ -390,7 +392,7 @@ tls_validate_xmit_skb(struct sock *sk, struct net_device *dev,
 static inline bool tls_is_sk_tx_device_offloaded(struct sock *sk)
 {
 #ifdef CONFIG_SOCK_VALIDATE_XMIT
-       return sk_fullsock(sk) &
+       return sk_fullsock(sk) &&
               (smp_load_acquire(&sk->sk_validate_xmit_skb) ==
               &tls_validate_xmit_skb);
 #else
index fd6d948755c81a5a48838bc4439a778e87542304..d8ce937bc3952b646e2db0bbdcfdf699cc5f7aa1 100644 (file)
@@ -269,13 +269,13 @@ void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len);
 int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb);
 void udp_skb_destructor(struct sock *sk, struct sk_buff *skb);
 struct sk_buff *__skb_recv_udp(struct sock *sk, unsigned int flags,
-                              int noblock, int *peeked, int *off, int *err);
+                              int noblock, int *off, int *err);
 static inline struct sk_buff *skb_recv_udp(struct sock *sk, unsigned int flags,
                                           int noblock, int *err)
 {
-       int peeked, off = 0;
+       int off = 0;
 
-       return __skb_recv_udp(sk, flags, noblock, &peeked, &off, err);
+       return __skb_recv_udp(sk, flags, noblock, &off, err);
 }
 
 int udp_v4_early_demux(struct sk_buff *skb);
index eb7db605955b852a469d11b62a93817f7726cf88..482b4ea87c3c4bd9fac786fa7165b30123174ea2 100644 (file)
@@ -802,8 +802,13 @@ struct snd_soc_component_driver {
        int probe_order;
        int remove_order;
 
-       /* signal if the module handling the component cannot be removed */
-       unsigned int ignore_module_refcount:1;
+       /*
+        * signal if the module handling the component should not be removed
+        * if a pcm is open. Setting this would prevent the module
+        * refcount being incremented in probe() but allow it be incremented
+        * when a pcm is opened and decremented when it is closed.
+        */
+       unsigned int module_get_upon_open:1;
 
        /* bits */
        unsigned int idle_bias_on:1;
@@ -1083,6 +1088,8 @@ struct snd_soc_card {
        struct mutex mutex;
        struct mutex dapm_mutex;
 
+       spinlock_t dpcm_lock;
+
        bool instantiated;
        bool topology_shortname_created;
 
index 61ea7a24c8e5479215811b41e462701a89ce58e7..6f2a4dc35e37eae4d302055d195ac2fc2b67089b 100644 (file)
@@ -13,9 +13,9 @@
 TRACE_EVENT(fib_table_lookup,
 
        TP_PROTO(u32 tb_id, const struct flowi4 *flp,
-                const struct fib_nh *nh, int err),
+                const struct fib_nh_common *nhc, int err),
 
-       TP_ARGS(tb_id, flp, nh, err),
+       TP_ARGS(tb_id, flp, nhc, err),
 
        TP_STRUCT__entry(
                __field(        u32,    tb_id           )
@@ -28,14 +28,17 @@ TRACE_EVENT(fib_table_lookup,
                __field(        __u8,   flags           )
                __array(        __u8,   src,    4       )
                __array(        __u8,   dst,    4       )
-               __array(        __u8,   gw    4       )
-               __array(        __u8,   saddr,  4       )
+               __array(        __u8,   gw4,    4       )
+               __array(        __u8,   gw6,    16      )
                __field(        u16,    sport           )
                __field(        u16,    dport           )
                __dynamic_array(char,  name,   IFNAMSIZ )
        ),
 
        TP_fast_assign(
+               struct in6_addr in6_zero = {};
+               struct net_device *dev;
+               struct in6_addr *in6;
                __be32 *p32;
 
                __entry->tb_id = tb_id;
@@ -62,33 +65,37 @@ TRACE_EVENT(fib_table_lookup,
                        __entry->dport = 0;
                }
 
-               if (nh) {
-                       struct net_device *dev;
+               dev = nhc ? nhc->nhc_dev : NULL;
+               __assign_str(name, dev ? dev->name : "-");
 
-                       p32 = (__be32 *) __entry->saddr;
-                       *p32 = nh->nh_saddr;
+               if (nhc) {
+                       if (nhc->nhc_gw_family == AF_INET) {
+                               p32 = (__be32 *) __entry->gw4;
+                               *p32 = nhc->nhc_gw.ipv4;
 
-                       p32 = (__be32 *) __entry->gw;
-                       *p32 = nh->fib_nh_gw4;
+                               in6 = (struct in6_addr *)__entry->gw6;
+                               *in6 = in6_zero;
+                       } else if (nhc->nhc_gw_family == AF_INET6) {
+                               p32 = (__be32 *) __entry->gw4;
+                               *p32 = 0;
 
-                       dev = nh->fib_nh_dev;
-                       __assign_str(name, dev ? dev->name : "-");
+                               in6 = (struct in6_addr *)__entry->gw6;
+                               *in6 = nhc->nhc_gw.ipv6;
+                       }
                } else {
-                       p32 = (__be32 *) __entry->saddr;
+                       p32 = (__be32 *) __entry->gw4;
                        *p32 = 0;
 
-                       p32 = (__be32 *) __entry->gw;
-                       *p32 = 0;
-
-                       __assign_str(name, "-");
+                       in6 = (struct in6_addr *)__entry->gw6;
+                       *in6 = in6_zero;
                }
        ),
 
-       TP_printk("table %u oif %d iif %d proto %u %pI4/%u -> %pI4/%u tos %d scope %d flags %x ==> dev %s gw %pI4 src %pI4 err %d",
+       TP_printk("table %u oif %d iif %d proto %u %pI4/%u -> %pI4/%u tos %d scope %d flags %x ==> dev %s gw %pI4/%pI6c err %d",
                  __entry->tb_id, __entry->oif, __entry->iif, __entry->proto,
                  __entry->src, __entry->sport, __entry->dst, __entry->dport,
                  __entry->tos, __entry->scope, __entry->flags,
-                 __get_str(name), __entry->gw, __entry->saddr, __entry->err)
+                 __get_str(name), __entry->gw4, __entry->gw6, __entry->err)
 );
 #endif /* _TRACE_FIB_H */
 
index 6d05ebdd669ce79fbbaea9f871b10f5734848673..c6abdcc77c1276df9a947af435295b1d4e9cd766 100644 (file)
 
 TRACE_EVENT(fib6_table_lookup,
 
-       TP_PROTO(const struct net *net, const struct fib6_info *f6i,
+       TP_PROTO(const struct net *net, const struct fib6_result *res,
                 struct fib6_table *table, const struct flowi6 *flp),
 
-       TP_ARGS(net, f6i, table, flp),
+       TP_ARGS(net, res, table, flp),
 
        TP_STRUCT__entry(
                __field(        u32,    tb_id           )
@@ -39,7 +39,7 @@ TRACE_EVENT(fib6_table_lookup,
                struct in6_addr *in6;
 
                __entry->tb_id = table->tb6_id;
-               __entry->err = ip6_rt_type_to_error(f6i->fib6_type);
+               __entry->err = ip6_rt_type_to_error(res->fib6_type);
                __entry->oif = flp->flowi6_oif;
                __entry->iif = flp->flowi6_iif;
                __entry->tos = ip6_tclass(flp->flowlabel);
@@ -62,20 +62,20 @@ TRACE_EVENT(fib6_table_lookup,
                        __entry->dport = 0;
                }
 
-               if (f6i->fib6_nh.fib_nh_dev) {
-                       __assign_str(name, f6i->fib6_nh.fib_nh_dev);
+               if (res->nh && res->nh->fib_nh_dev) {
+                       __assign_str(name, res->nh->fib_nh_dev);
                } else {
                        __assign_str(name, "-");
                }
-               if (f6i == net->ipv6.fib6_null_entry) {
+               if (res->f6i == net->ipv6.fib6_null_entry) {
                        struct in6_addr in6_zero = {};
 
                        in6 = (struct in6_addr *)__entry->gw;
                        *in6 = in6_zero;
 
-               } else if (f6i) {
+               } else if (res->nh) {
                        in6 = (struct in6_addr *)__entry->gw;
-                       *in6 = f6i->fib6_nh.fib_nh_gw6;
+                       *in6 = res->nh->fib_nh_gw6;
                }
        ),
 
index 6a4cfaef33a2122d00dedb10447959d7fcc640be..19a25ed323a506f92c9867bcf259f5c74a2b1e27 100644 (file)
@@ -93,7 +93,7 @@ TRACE_EVENT(mlxsw_sp_acl_tcam_vregion_migrate_end,
                  __entry->mlxsw_sp, __entry->vregion)
 );
 
-TRACE_EVENT(mlxsw_sp_acl_tcam_vregion_rehash_dis,
+TRACE_EVENT(mlxsw_sp_acl_tcam_vregion_rehash_rollback_failed,
        TP_PROTO(const struct mlxsw_sp *mlxsw_sp,
                 const struct mlxsw_sp_acl_tcam_vregion *vregion),
 
index 44a3259ed4a5bde50e231a982624b0893e5bb0eb..b6e0cbc2c71f16df87380860e88544acb6d80a40 100644 (file)
@@ -28,7 +28,7 @@ TRACE_EVENT_FN(sys_enter,
 
        TP_fast_assign(
                __entry->id     = id;
-               syscall_get_arguments(current, regs, 0, 6, __entry->args);
+               syscall_get_arguments(current, regs, __entry->args);
        ),
 
        TP_printk("NR %ld (%lx, %lx, %lx, %lx, %lx, %lx)",
index 5f24b50c9e88eb72dd2a396d8af5dc193e6bd782..059dc2bedaf6e895d30fab48c637e87471049920 100644 (file)
@@ -7,5 +7,7 @@ no-export-headers += kvm.h
 endif
 
 ifeq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/uapi/asm/kvm_para.h),)
+ifeq ($(wildcard $(objtree)/arch/$(SRCARCH)/include/generated/uapi/asm/kvm_para.h),)
 no-export-headers += kvm_para.h
 endif
+endif
index 837024512bafd92c3773282ac5362d826fc93502..2e96d0b4bf65d6fd5f113487e432c66f43e0aa8d 100644 (file)
@@ -105,6 +105,7 @@ enum bpf_cmd {
        BPF_BTF_GET_FD_BY_ID,
        BPF_TASK_FD_QUERY,
        BPF_MAP_LOOKUP_AND_DELETE_ELEM,
+       BPF_MAP_FREEZE,
 };
 
 enum bpf_map_type {
@@ -255,8 +256,19 @@ enum bpf_attach_type {
  */
 #define BPF_F_ANY_ALIGNMENT    (1U << 1)
 
-/* when bpf_ldimm64->src_reg == BPF_PSEUDO_MAP_FD, bpf_ldimm64->imm == fd */
+/* When BPF ldimm64's insn[0].src_reg != 0 then this can have
+ * two extensions:
+ *
+ * insn[0].src_reg:  BPF_PSEUDO_MAP_FD   BPF_PSEUDO_MAP_VALUE
+ * insn[0].imm:      map fd              map fd
+ * insn[1].imm:      0                   offset into value
+ * insn[0].off:      0                   0
+ * insn[1].off:      0                   0
+ * ldimm64 rewrite:  address of map      address of map[0]+offset
+ * verifier type:    CONST_PTR_TO_MAP    PTR_TO_MAP_VALUE
+ */
 #define BPF_PSEUDO_MAP_FD      1
+#define BPF_PSEUDO_MAP_VALUE   2
 
 /* when bpf_call->src_reg == BPF_PSEUDO_CALL, bpf_call->imm == pc-relative
  * offset to another bpf function
@@ -283,7 +295,7 @@ enum bpf_attach_type {
 
 #define BPF_OBJ_NAME_LEN 16U
 
-/* Flags for accessing BPF object */
+/* Flags for accessing BPF object from syscall side. */
 #define BPF_F_RDONLY           (1U << 3)
 #define BPF_F_WRONLY           (1U << 4)
 
@@ -293,6 +305,10 @@ enum bpf_attach_type {
 /* Zero-initialize hash function seed. This should only be used for testing. */
 #define BPF_F_ZERO_SEED                (1U << 6)
 
+/* Flags for accessing BPF object from program side. */
+#define BPF_F_RDONLY_PROG      (1U << 7)
+#define BPF_F_WRONLY_PROG      (1U << 8)
+
 /* flags for BPF_PROG_QUERY */
 #define BPF_F_QUERY_EFFECTIVE  (1U << 0)
 
@@ -396,6 +412,13 @@ union bpf_attr {
                __aligned_u64   data_out;
                __u32           repeat;
                __u32           duration;
+               __u32           ctx_size_in;    /* input: len of ctx_in */
+               __u32           ctx_size_out;   /* input/output: len of ctx_out
+                                                *   returns ENOSPC if ctx_out
+                                                *   is too small.
+                                                */
+               __aligned_u64   ctx_in;
+               __aligned_u64   ctx_out;
        } test;
 
        struct { /* anonymous struct used by BPF_*_GET_*_ID */
@@ -1500,6 +1523,10 @@ union bpf_attr {
  *             * **BPF_F_ADJ_ROOM_ENCAP_L4_UDP **:
  *               Use with ENCAP_L3 flags to further specify the tunnel type.
  *
+ *             * **BPF_F_ADJ_ROOM_ENCAP_L2(len) **:
+ *               Use with ENCAP_L3/L4 flags to further specify the tunnel
+ *               type; **len** is the length of the inner MAC header.
+ *
  *             A call to this helper is susceptible to change the underlaying
  *             packet buffer. Therefore, at load time, all checks on pointers
  *             previously done by the verifier are invalidated and must be
@@ -2641,10 +2668,16 @@ enum bpf_func_id {
 /* BPF_FUNC_skb_adjust_room flags. */
 #define BPF_F_ADJ_ROOM_FIXED_GSO       (1ULL << 0)
 
+#define        BPF_ADJ_ROOM_ENCAP_L2_MASK      0xff
+#define        BPF_ADJ_ROOM_ENCAP_L2_SHIFT     56
+
 #define BPF_F_ADJ_ROOM_ENCAP_L3_IPV4   (1ULL << 1)
 #define BPF_F_ADJ_ROOM_ENCAP_L3_IPV6   (1ULL << 2)
 #define BPF_F_ADJ_ROOM_ENCAP_L4_GRE    (1ULL << 3)
 #define BPF_F_ADJ_ROOM_ENCAP_L4_UDP    (1ULL << 4)
+#define        BPF_F_ADJ_ROOM_ENCAP_L2(len)    (((__u64)len & \
+                                         BPF_ADJ_ROOM_ENCAP_L2_MASK) \
+                                        << BPF_ADJ_ROOM_ENCAP_L2_SHIFT)
 
 /* Mode for BPF_FUNC_skb_adjust_room helper. */
 enum bpf_adj_room_mode {
index 7b7475ef2f175c9279915aa8d9eb014845533bd9..9310652ca4f96bb56c6f7f561332bcff01511f3a 100644 (file)
@@ -39,11 +39,11 @@ struct btf_type {
         *             struct, union and fwd
         */
        __u32 info;
-       /* "size" is used by INT, ENUM, STRUCT and UNION.
+       /* "size" is used by INT, ENUM, STRUCT, UNION and DATASEC.
         * "size" tells the size of the type it is describing.
         *
         * "type" is used by PTR, TYPEDEF, VOLATILE, CONST, RESTRICT,
-        * FUNC and FUNC_PROTO.
+        * FUNC, FUNC_PROTO and VAR.
         * "type" is a type_id referring to another type.
         */
        union {
@@ -70,8 +70,10 @@ struct btf_type {
 #define BTF_KIND_RESTRICT      11      /* Restrict     */
 #define BTF_KIND_FUNC          12      /* Function     */
 #define BTF_KIND_FUNC_PROTO    13      /* Function Proto       */
-#define BTF_KIND_MAX           13
-#define NR_BTF_KINDS           14
+#define BTF_KIND_VAR           14      /* Variable     */
+#define BTF_KIND_DATASEC       15      /* Section      */
+#define BTF_KIND_MAX           BTF_KIND_DATASEC
+#define NR_BTF_KINDS           (BTF_KIND_MAX + 1)
 
 /* For some specific BTF_KIND, "struct btf_type" is immediately
  * followed by extra data.
@@ -138,4 +140,26 @@ struct btf_param {
        __u32   type;
 };
 
+enum {
+       BTF_VAR_STATIC = 0,
+       BTF_VAR_GLOBAL_ALLOCATED,
+};
+
+/* BTF_KIND_VAR is followed by a single "struct btf_var" to describe
+ * additional information related to the variable such as its linkage.
+ */
+struct btf_var {
+       __u32   linkage;
+};
+
+/* BTF_KIND_DATASEC is followed by multiple "struct btf_var_secinfo"
+ * to describe all BTF_KIND_VAR types it contains along with it's
+ * in-section offset as well as size.
+ */
+struct btf_var_secinfo {
+       __u32   type;
+       __u32   offset;
+       __u32   size;
+};
+
 #endif /* _UAPI__LINUX_BTF_H__ */
index 50c76f4fa402a79d67b0834969e9d8a26f33317f..818ad368b5860b343545f200b4893fdf4565042b 100644 (file)
@@ -1599,7 +1599,7 @@ enum ethtool_link_mode_bit_indices {
 
 static inline int ethtool_validate_speed(__u32 speed)
 {
-       return speed <= INT_MAX || speed == SPEED_UNKNOWN;
+       return speed <= INT_MAX || speed == (__u32)SPEED_UNKNOWN;
 }
 
 /* Duplex, half or full. */
index 1c916b2f89dc8a31a330cb0979fc1ced992e51c6..e34f436fc79d35f7d5930e11ef5176bb088ecd9b 100644 (file)
 
 #define IP_VS_PEDATA_MAXLEN     255
 
+/* Tunnel types */
+enum {
+       IP_VS_CONN_F_TUNNEL_TYPE_IPIP = 0,      /* IPIP */
+       IP_VS_CONN_F_TUNNEL_TYPE_GUE,           /* GUE */
+       IP_VS_CONN_F_TUNNEL_TYPE_MAX,
+};
+
 /*
  *     The struct ip_vs_service_user and struct ip_vs_dest_user are
  *     used to set IPVS rules through setsockopt.
@@ -392,6 +399,10 @@ enum {
 
        IPVS_DEST_ATTR_STATS64,         /* nested attribute for dest stats */
 
+       IPVS_DEST_ATTR_TUN_TYPE,        /* tunnel type */
+
+       IPVS_DEST_ATTR_TUN_PORT,        /* tunnel port */
+
        __IPVS_DEST_ATTR_MAX,
 };
 
index a66c8de006cc0396afceb1b77db9634e4f2c5e56..061bb3eb20c37d886c1a8741e59321d957bf9a6c 100644 (file)
@@ -1522,15 +1522,21 @@ enum nft_flowtable_hook_attributes {
  *
  * @NFTA_OSF_DREG: destination register (NLA_U32: nft_registers)
  * @NFTA_OSF_TTL: Value of the TTL osf option (NLA_U8)
+ * @NFTA_OSF_FLAGS: flags (NLA_U32)
  */
 enum nft_osf_attributes {
        NFTA_OSF_UNSPEC,
        NFTA_OSF_DREG,
        NFTA_OSF_TTL,
+       NFTA_OSF_FLAGS,
        __NFTA_OSF_MAX,
 };
 #define NFTA_OSF_MAX (__NFTA_OSF_MAX - 1)
 
+enum nft_osf_flags {
+       NFT_OSF_F_VERSION = (1 << 0),
+};
+
 /**
  * enum nft_device_attributes - nf_tables device netlink attributes
  *
index 0e68024f36c712dcb295f6214914ea5dc3371a25..26f39816af14c149ab1d8be5842112f4bf36c18c 100644 (file)
@@ -102,6 +102,66 @@ enum vmmdev_request_type {
 #define VMMDEVREQ_HGCM_CALL VMMDEVREQ_HGCM_CALL32
 #endif
 
+/* vmmdev_request_header.requestor defines */
+
+/* Requestor user not given. */
+#define VMMDEV_REQUESTOR_USR_NOT_GIVEN                      0x00000000
+/* The kernel driver (vboxguest) is the requestor. */
+#define VMMDEV_REQUESTOR_USR_DRV                            0x00000001
+/* Some other kernel driver is the requestor. */
+#define VMMDEV_REQUESTOR_USR_DRV_OTHER                      0x00000002
+/* The root or a admin user is the requestor. */
+#define VMMDEV_REQUESTOR_USR_ROOT                           0x00000003
+/* Regular joe user is making the request. */
+#define VMMDEV_REQUESTOR_USR_USER                           0x00000006
+/* User classification mask. */
+#define VMMDEV_REQUESTOR_USR_MASK                           0x00000007
+
+/* Kernel mode request. Note this is 0, check for !USERMODE instead. */
+#define VMMDEV_REQUESTOR_KERNEL                             0x00000000
+/* User mode request. */
+#define VMMDEV_REQUESTOR_USERMODE                           0x00000008
+/* User or kernel mode classification mask. */
+#define VMMDEV_REQUESTOR_MODE_MASK                          0x00000008
+
+/* Don't know the physical console association of the requestor. */
+#define VMMDEV_REQUESTOR_CON_DONT_KNOW                      0x00000000
+/*
+ * The request originates with a process that is NOT associated with the
+ * physical console.
+ */
+#define VMMDEV_REQUESTOR_CON_NO                             0x00000010
+/* Requestor process is associated with the physical console. */
+#define VMMDEV_REQUESTOR_CON_YES                            0x00000020
+/* Console classification mask. */
+#define VMMDEV_REQUESTOR_CON_MASK                           0x00000030
+
+/* Requestor is member of special VirtualBox user group. */
+#define VMMDEV_REQUESTOR_GRP_VBOX                           0x00000080
+
+/* Note: trust level is for windows guests only, linux always uses not-given */
+/* Requestor trust level: Unspecified */
+#define VMMDEV_REQUESTOR_TRUST_NOT_GIVEN                    0x00000000
+/* Requestor trust level: Untrusted (SID S-1-16-0) */
+#define VMMDEV_REQUESTOR_TRUST_UNTRUSTED                    0x00001000
+/* Requestor trust level: Untrusted (SID S-1-16-4096) */
+#define VMMDEV_REQUESTOR_TRUST_LOW                          0x00002000
+/* Requestor trust level: Medium (SID S-1-16-8192) */
+#define VMMDEV_REQUESTOR_TRUST_MEDIUM                       0x00003000
+/* Requestor trust level: Medium plus (SID S-1-16-8448) */
+#define VMMDEV_REQUESTOR_TRUST_MEDIUM_PLUS                  0x00004000
+/* Requestor trust level: High (SID S-1-16-12288) */
+#define VMMDEV_REQUESTOR_TRUST_HIGH                         0x00005000
+/* Requestor trust level: System (SID S-1-16-16384) */
+#define VMMDEV_REQUESTOR_TRUST_SYSTEM                       0x00006000
+/* Requestor trust level >= Protected (SID S-1-16-20480, S-1-16-28672) */
+#define VMMDEV_REQUESTOR_TRUST_PROTECTED                    0x00007000
+/* Requestor trust level mask */
+#define VMMDEV_REQUESTOR_TRUST_MASK                         0x00007000
+
+/* Requestor is using the less trusted user device node (/dev/vboxuser) */
+#define VMMDEV_REQUESTOR_USER_DEVICE                        0x00008000
+
 /** HGCM service location types. */
 enum vmmdev_hgcm_service_location_type {
        VMMDEV_HGCM_LOC_INVALID    = 0,
index 404d4b9ffe7644553a1b59fba043b151d935a2e9..df1153cea0b7ee2a27e19682837f81922fef353e 100644 (file)
@@ -32,6 +32,7 @@
 
 #ifndef __KERNEL__
 #include <stdlib.h>
+#include <time.h>
 #endif
 
 /*
index 0af05752969f1bc4641a591d717edd1b46a18b5a..095274a871f89b9d37f825d9190355a8592a10b3 100644 (file)
@@ -101,7 +101,6 @@ static const struct rhashtable_params ipc_kht_params = {
        .head_offset            = offsetof(struct kern_ipc_perm, khtnode),
        .key_offset             = offsetof(struct kern_ipc_perm, key),
        .key_len                = FIELD_SIZEOF(struct kern_ipc_perm, key),
-       .locks_mul              = 1,
        .automatic_shrinking    = true,
 };
 
index c72e0d8e1e657d03ef402a48f00c9ee906f1bf90..584636c9e2eb0e27f235990f5e1306877bb7aad1 100644 (file)
@@ -22,7 +22,7 @@
 #include "map_in_map.h"
 
 #define ARRAY_CREATE_FLAG_MASK \
-       (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
+       (BPF_F_NUMA_NODE | BPF_F_ACCESS_MASK)
 
 static void bpf_array_free_percpu(struct bpf_array *array)
 {
@@ -63,6 +63,7 @@ int array_map_alloc_check(union bpf_attr *attr)
        if (attr->max_entries == 0 || attr->key_size != 4 ||
            attr->value_size == 0 ||
            attr->map_flags & ~ARRAY_CREATE_FLAG_MASK ||
+           !bpf_map_flags_access_ok(attr->map_flags) ||
            (percpu && numa_node != NUMA_NO_NODE))
                return -EINVAL;
 
@@ -160,6 +161,36 @@ static void *array_map_lookup_elem(struct bpf_map *map, void *key)
        return array->value + array->elem_size * (index & array->index_mask);
 }
 
+static int array_map_direct_value_addr(const struct bpf_map *map, u64 *imm,
+                                      u32 off)
+{
+       struct bpf_array *array = container_of(map, struct bpf_array, map);
+
+       if (map->max_entries != 1)
+               return -ENOTSUPP;
+       if (off >= map->value_size)
+               return -EINVAL;
+
+       *imm = (unsigned long)array->value;
+       return 0;
+}
+
+static int array_map_direct_value_meta(const struct bpf_map *map, u64 imm,
+                                      u32 *off)
+{
+       struct bpf_array *array = container_of(map, struct bpf_array, map);
+       u64 base = (unsigned long)array->value;
+       u64 range = array->elem_size;
+
+       if (map->max_entries != 1)
+               return -ENOTSUPP;
+       if (imm < base || imm >= base + range)
+               return -ENOENT;
+
+       *off = imm - base;
+       return 0;
+}
+
 /* emit BPF instructions equivalent to C code of array_map_lookup_elem() */
 static u32 array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
 {
@@ -360,7 +391,8 @@ static void array_map_seq_show_elem(struct bpf_map *map, void *key,
                return;
        }
 
-       seq_printf(m, "%u: ", *(u32 *)key);
+       if (map->btf_key_type_id)
+               seq_printf(m, "%u: ", *(u32 *)key);
        btf_type_seq_show(map->btf, map->btf_value_type_id, value, m);
        seq_puts(m, "\n");
 
@@ -397,6 +429,18 @@ static int array_map_check_btf(const struct bpf_map *map,
 {
        u32 int_data;
 
+       /* One exception for keyless BTF: .bss/.data/.rodata map */
+       if (btf_type_is_void(key_type)) {
+               if (map->map_type != BPF_MAP_TYPE_ARRAY ||
+                   map->max_entries != 1)
+                       return -EINVAL;
+
+               if (BTF_INFO_KIND(value_type->info) != BTF_KIND_DATASEC)
+                       return -EINVAL;
+
+               return 0;
+       }
+
        if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT)
                return -EINVAL;
 
@@ -419,6 +463,8 @@ const struct bpf_map_ops array_map_ops = {
        .map_update_elem = array_map_update_elem,
        .map_delete_elem = array_map_delete_elem,
        .map_gen_lookup = array_map_gen_lookup,
+       .map_direct_value_addr = array_map_direct_value_addr,
+       .map_direct_value_meta = array_map_direct_value_meta,
        .map_seq_show_elem = array_map_seq_show_elem,
        .map_check_btf = array_map_check_btf,
 };
@@ -440,6 +486,9 @@ static int fd_array_map_alloc_check(union bpf_attr *attr)
        /* only file descriptors can be stored in this type of map */
        if (attr->value_size != sizeof(u32))
                return -EINVAL;
+       /* Program read-only/write-only not supported for special maps yet. */
+       if (attr->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG))
+               return -EINVAL;
        return array_map_alloc_check(attr);
 }
 
index bd3921b1514b0b24bc9600f04190702badb4897f..cad09858a5f25beabb53aaaefeb1b4ad5a7a44f1 100644 (file)
             i < btf_type_vlen(struct_type);                            \
             i++, member++)
 
+#define for_each_vsi(i, struct_type, member)                   \
+       for (i = 0, member = btf_type_var_secinfo(struct_type); \
+            i < btf_type_vlen(struct_type);                    \
+            i++, member++)
+
+#define for_each_vsi_from(i, from, struct_type, member)                                \
+       for (i = from, member = btf_type_var_secinfo(struct_type) + from;       \
+            i < btf_type_vlen(struct_type);                                    \
+            i++, member++)
+
 static DEFINE_IDR(btf_idr);
 static DEFINE_SPINLOCK(btf_idr_lock);
 
@@ -262,6 +272,8 @@ static const char * const btf_kind_str[NR_BTF_KINDS] = {
        [BTF_KIND_RESTRICT]     = "RESTRICT",
        [BTF_KIND_FUNC]         = "FUNC",
        [BTF_KIND_FUNC_PROTO]   = "FUNC_PROTO",
+       [BTF_KIND_VAR]          = "VAR",
+       [BTF_KIND_DATASEC]      = "DATASEC",
 };
 
 struct btf_kind_operations {
@@ -314,7 +326,7 @@ static bool btf_type_is_modifier(const struct btf_type *t)
        return false;
 }
 
-static bool btf_type_is_void(const struct btf_type *t)
+bool btf_type_is_void(const struct btf_type *t)
 {
        return t == &btf_void;
 }
@@ -375,13 +387,36 @@ static bool btf_type_is_int(const struct btf_type *t)
        return BTF_INFO_KIND(t->info) == BTF_KIND_INT;
 }
 
+static bool btf_type_is_var(const struct btf_type *t)
+{
+       return BTF_INFO_KIND(t->info) == BTF_KIND_VAR;
+}
+
+static bool btf_type_is_datasec(const struct btf_type *t)
+{
+       return BTF_INFO_KIND(t->info) == BTF_KIND_DATASEC;
+}
+
+/* Types that act only as a source, not sink or intermediate
+ * type when resolving.
+ */
+static bool btf_type_is_resolve_source_only(const struct btf_type *t)
+{
+       return btf_type_is_var(t) ||
+              btf_type_is_datasec(t);
+}
+
 /* What types need to be resolved?
  *
  * btf_type_is_modifier() is an obvious one.
  *
  * btf_type_is_struct() because its member refers to
  * another type (through member->type).
-
+ *
+ * btf_type_is_var() because the variable refers to
+ * another type. btf_type_is_datasec() holds multiple
+ * btf_type_is_var() types that need resolving.
+ *
  * btf_type_is_array() because its element (array->type)
  * refers to another type.  Array can be thought of a
  * special case of struct while array just has the same
@@ -390,9 +425,11 @@ static bool btf_type_is_int(const struct btf_type *t)
 static bool btf_type_needs_resolve(const struct btf_type *t)
 {
        return btf_type_is_modifier(t) ||
-               btf_type_is_ptr(t) ||
-               btf_type_is_struct(t) ||
-               btf_type_is_array(t);
+              btf_type_is_ptr(t) ||
+              btf_type_is_struct(t) ||
+              btf_type_is_array(t) ||
+              btf_type_is_var(t) ||
+              btf_type_is_datasec(t);
 }
 
 /* t->size can be used */
@@ -403,6 +440,7 @@ static bool btf_type_has_size(const struct btf_type *t)
        case BTF_KIND_STRUCT:
        case BTF_KIND_UNION:
        case BTF_KIND_ENUM:
+       case BTF_KIND_DATASEC:
                return true;
        }
 
@@ -467,6 +505,16 @@ static const struct btf_enum *btf_type_enum(const struct btf_type *t)
        return (const struct btf_enum *)(t + 1);
 }
 
+static const struct btf_var *btf_type_var(const struct btf_type *t)
+{
+       return (const struct btf_var *)(t + 1);
+}
+
+static const struct btf_var_secinfo *btf_type_var_secinfo(const struct btf_type *t)
+{
+       return (const struct btf_var_secinfo *)(t + 1);
+}
+
 static const struct btf_kind_operations *btf_type_ops(const struct btf_type *t)
 {
        return kind_ops[BTF_INFO_KIND(t->info)];
@@ -478,23 +526,31 @@ static bool btf_name_offset_valid(const struct btf *btf, u32 offset)
                offset < btf->hdr.str_len;
 }
 
-/* Only C-style identifier is permitted. This can be relaxed if
- * necessary.
- */
-static bool btf_name_valid_identifier(const struct btf *btf, u32 offset)
+static bool __btf_name_char_ok(char c, bool first, bool dot_ok)
+{
+       if ((first ? !isalpha(c) :
+                    !isalnum(c)) &&
+           c != '_' &&
+           ((c == '.' && !dot_ok) ||
+             c != '.'))
+               return false;
+       return true;
+}
+
+static bool __btf_name_valid(const struct btf *btf, u32 offset, bool dot_ok)
 {
        /* offset must be valid */
        const char *src = &btf->strings[offset];
        const char *src_limit;
 
-       if (!isalpha(*src) && *src != '_')
+       if (!__btf_name_char_ok(*src, true, dot_ok))
                return false;
 
        /* set a limit on identifier length */
        src_limit = src + KSYM_NAME_LEN;
        src++;
        while (*src && src < src_limit) {
-               if (!isalnum(*src) && *src != '_')
+               if (!__btf_name_char_ok(*src, false, dot_ok))
                        return false;
                src++;
        }
@@ -502,6 +558,19 @@ static bool btf_name_valid_identifier(const struct btf *btf, u32 offset)
        return !*src;
 }
 
+/* Only C-style identifier is permitted. This can be relaxed if
+ * necessary.
+ */
+static bool btf_name_valid_identifier(const struct btf *btf, u32 offset)
+{
+       return __btf_name_valid(btf, offset, false);
+}
+
+static bool btf_name_valid_section(const struct btf *btf, u32 offset)
+{
+       return __btf_name_valid(btf, offset, true);
+}
+
 static const char *__btf_name_by_offset(const struct btf *btf, u32 offset)
 {
        if (!offset)
@@ -697,6 +766,32 @@ static void btf_verifier_log_member(struct btf_verifier_env *env,
        __btf_verifier_log(log, "\n");
 }
 
+__printf(4, 5)
+static void btf_verifier_log_vsi(struct btf_verifier_env *env,
+                                const struct btf_type *datasec_type,
+                                const struct btf_var_secinfo *vsi,
+                                const char *fmt, ...)
+{
+       struct bpf_verifier_log *log = &env->log;
+       va_list args;
+
+       if (!bpf_verifier_log_needed(log))
+               return;
+       if (env->phase != CHECK_META)
+               btf_verifier_log_type(env, datasec_type, NULL);
+
+       __btf_verifier_log(log, "\t type_id=%u offset=%u size=%u",
+                          vsi->type, vsi->offset, vsi->size);
+       if (fmt && *fmt) {
+               __btf_verifier_log(log, " ");
+               va_start(args, fmt);
+               bpf_verifier_vlog(log, fmt, args);
+               va_end(args);
+       }
+
+       __btf_verifier_log(log, "\n");
+}
+
 static void btf_verifier_log_hdr(struct btf_verifier_env *env,
                                 u32 btf_data_size)
 {
@@ -974,7 +1069,8 @@ const struct btf_type *btf_type_id_size(const struct btf *btf,
        } else if (btf_type_is_ptr(size_type)) {
                size = sizeof(void *);
        } else {
-               if (WARN_ON_ONCE(!btf_type_is_modifier(size_type)))
+               if (WARN_ON_ONCE(!btf_type_is_modifier(size_type) &&
+                                !btf_type_is_var(size_type)))
                        return NULL;
 
                size = btf->resolved_sizes[size_type_id];
@@ -1509,7 +1605,7 @@ static int btf_modifier_resolve(struct btf_verifier_env *env,
        u32 next_type_size = 0;
 
        next_type = btf_type_by_id(btf, next_type_id);
-       if (!next_type) {
+       if (!next_type || btf_type_is_resolve_source_only(next_type)) {
                btf_verifier_log_type(env, v->t, "Invalid type_id");
                return -EINVAL;
        }
@@ -1542,6 +1638,53 @@ static int btf_modifier_resolve(struct btf_verifier_env *env,
        return 0;
 }
 
+static int btf_var_resolve(struct btf_verifier_env *env,
+                          const struct resolve_vertex *v)
+{
+       const struct btf_type *next_type;
+       const struct btf_type *t = v->t;
+       u32 next_type_id = t->type;
+       struct btf *btf = env->btf;
+       u32 next_type_size;
+
+       next_type = btf_type_by_id(btf, next_type_id);
+       if (!next_type || btf_type_is_resolve_source_only(next_type)) {
+               btf_verifier_log_type(env, v->t, "Invalid type_id");
+               return -EINVAL;
+       }
+
+       if (!env_type_is_resolve_sink(env, next_type) &&
+           !env_type_is_resolved(env, next_type_id))
+               return env_stack_push(env, next_type, next_type_id);
+
+       if (btf_type_is_modifier(next_type)) {
+               const struct btf_type *resolved_type;
+               u32 resolved_type_id;
+
+               resolved_type_id = next_type_id;
+               resolved_type = btf_type_id_resolve(btf, &resolved_type_id);
+
+               if (btf_type_is_ptr(resolved_type) &&
+                   !env_type_is_resolve_sink(env, resolved_type) &&
+                   !env_type_is_resolved(env, resolved_type_id))
+                       return env_stack_push(env, resolved_type,
+                                             resolved_type_id);
+       }
+
+       /* We must resolve to something concrete at this point, no
+        * forward types or similar that would resolve to size of
+        * zero is allowed.
+        */
+       if (!btf_type_id_size(btf, &next_type_id, &next_type_size)) {
+               btf_verifier_log_type(env, v->t, "Invalid type_id");
+               return -EINVAL;
+       }
+
+       env_stack_pop_resolved(env, next_type_id, next_type_size);
+
+       return 0;
+}
+
 static int btf_ptr_resolve(struct btf_verifier_env *env,
                           const struct resolve_vertex *v)
 {
@@ -1551,7 +1694,7 @@ static int btf_ptr_resolve(struct btf_verifier_env *env,
        struct btf *btf = env->btf;
 
        next_type = btf_type_by_id(btf, next_type_id);
-       if (!next_type) {
+       if (!next_type || btf_type_is_resolve_source_only(next_type)) {
                btf_verifier_log_type(env, v->t, "Invalid type_id");
                return -EINVAL;
        }
@@ -1609,6 +1752,15 @@ static void btf_modifier_seq_show(const struct btf *btf,
        btf_type_ops(t)->seq_show(btf, t, type_id, data, bits_offset, m);
 }
 
+static void btf_var_seq_show(const struct btf *btf, const struct btf_type *t,
+                            u32 type_id, void *data, u8 bits_offset,
+                            struct seq_file *m)
+{
+       t = btf_type_id_resolve(btf, &type_id);
+
+       btf_type_ops(t)->seq_show(btf, t, type_id, data, bits_offset, m);
+}
+
 static void btf_ptr_seq_show(const struct btf *btf, const struct btf_type *t,
                             u32 type_id, void *data, u8 bits_offset,
                             struct seq_file *m)
@@ -1776,7 +1928,8 @@ static int btf_array_resolve(struct btf_verifier_env *env,
        /* Check array->index_type */
        index_type_id = array->index_type;
        index_type = btf_type_by_id(btf, index_type_id);
-       if (btf_type_nosize_or_null(index_type)) {
+       if (btf_type_is_resolve_source_only(index_type) ||
+           btf_type_nosize_or_null(index_type)) {
                btf_verifier_log_type(env, v->t, "Invalid index");
                return -EINVAL;
        }
@@ -1795,7 +1948,8 @@ static int btf_array_resolve(struct btf_verifier_env *env,
        /* Check array->type */
        elem_type_id = array->type;
        elem_type = btf_type_by_id(btf, elem_type_id);
-       if (btf_type_nosize_or_null(elem_type)) {
+       if (btf_type_is_resolve_source_only(elem_type) ||
+           btf_type_nosize_or_null(elem_type)) {
                btf_verifier_log_type(env, v->t,
                                      "Invalid elem");
                return -EINVAL;
@@ -2016,7 +2170,8 @@ static int btf_struct_resolve(struct btf_verifier_env *env,
                const struct btf_type *member_type = btf_type_by_id(env->btf,
                                                                member_type_id);
 
-               if (btf_type_nosize_or_null(member_type)) {
+               if (btf_type_is_resolve_source_only(member_type) ||
+                   btf_type_nosize_or_null(member_type)) {
                        btf_verifier_log_member(env, v->t, member,
                                                "Invalid member");
                        return -EINVAL;
@@ -2411,6 +2566,222 @@ static struct btf_kind_operations func_ops = {
        .seq_show = btf_df_seq_show,
 };
 
+static s32 btf_var_check_meta(struct btf_verifier_env *env,
+                             const struct btf_type *t,
+                             u32 meta_left)
+{
+       const struct btf_var *var;
+       u32 meta_needed = sizeof(*var);
+
+       if (meta_left < meta_needed) {
+               btf_verifier_log_basic(env, t,
+                                      "meta_left:%u meta_needed:%u",
+                                      meta_left, meta_needed);
+               return -EINVAL;
+       }
+
+       if (btf_type_vlen(t)) {
+               btf_verifier_log_type(env, t, "vlen != 0");
+               return -EINVAL;
+       }
+
+       if (btf_type_kflag(t)) {
+               btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
+               return -EINVAL;
+       }
+
+       if (!t->name_off ||
+           !__btf_name_valid(env->btf, t->name_off, true)) {
+               btf_verifier_log_type(env, t, "Invalid name");
+               return -EINVAL;
+       }
+
+       /* A var cannot be in type void */
+       if (!t->type || !BTF_TYPE_ID_VALID(t->type)) {
+               btf_verifier_log_type(env, t, "Invalid type_id");
+               return -EINVAL;
+       }
+
+       var = btf_type_var(t);
+       if (var->linkage != BTF_VAR_STATIC &&
+           var->linkage != BTF_VAR_GLOBAL_ALLOCATED) {
+               btf_verifier_log_type(env, t, "Linkage not supported");
+               return -EINVAL;
+       }
+
+       btf_verifier_log_type(env, t, NULL);
+
+       return meta_needed;
+}
+
+static void btf_var_log(struct btf_verifier_env *env, const struct btf_type *t)
+{
+       const struct btf_var *var = btf_type_var(t);
+
+       btf_verifier_log(env, "type_id=%u linkage=%u", t->type, var->linkage);
+}
+
+static const struct btf_kind_operations var_ops = {
+       .check_meta             = btf_var_check_meta,
+       .resolve                = btf_var_resolve,
+       .check_member           = btf_df_check_member,
+       .check_kflag_member     = btf_df_check_kflag_member,
+       .log_details            = btf_var_log,
+       .seq_show               = btf_var_seq_show,
+};
+
+static s32 btf_datasec_check_meta(struct btf_verifier_env *env,
+                                 const struct btf_type *t,
+                                 u32 meta_left)
+{
+       const struct btf_var_secinfo *vsi;
+       u64 last_vsi_end_off = 0, sum = 0;
+       u32 i, meta_needed;
+
+       meta_needed = btf_type_vlen(t) * sizeof(*vsi);
+       if (meta_left < meta_needed) {
+               btf_verifier_log_basic(env, t,
+                                      "meta_left:%u meta_needed:%u",
+                                      meta_left, meta_needed);
+               return -EINVAL;
+       }
+
+       if (!btf_type_vlen(t)) {
+               btf_verifier_log_type(env, t, "vlen == 0");
+               return -EINVAL;
+       }
+
+       if (!t->size) {
+               btf_verifier_log_type(env, t, "size == 0");
+               return -EINVAL;
+       }
+
+       if (btf_type_kflag(t)) {
+               btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
+               return -EINVAL;
+       }
+
+       if (!t->name_off ||
+           !btf_name_valid_section(env->btf, t->name_off)) {
+               btf_verifier_log_type(env, t, "Invalid name");
+               return -EINVAL;
+       }
+
+       btf_verifier_log_type(env, t, NULL);
+
+       for_each_vsi(i, t, vsi) {
+               /* A var cannot be in type void */
+               if (!vsi->type || !BTF_TYPE_ID_VALID(vsi->type)) {
+                       btf_verifier_log_vsi(env, t, vsi,
+                                            "Invalid type_id");
+                       return -EINVAL;
+               }
+
+               if (vsi->offset < last_vsi_end_off || vsi->offset >= t->size) {
+                       btf_verifier_log_vsi(env, t, vsi,
+                                            "Invalid offset");
+                       return -EINVAL;
+               }
+
+               if (!vsi->size || vsi->size > t->size) {
+                       btf_verifier_log_vsi(env, t, vsi,
+                                            "Invalid size");
+                       return -EINVAL;
+               }
+
+               last_vsi_end_off = vsi->offset + vsi->size;
+               if (last_vsi_end_off > t->size) {
+                       btf_verifier_log_vsi(env, t, vsi,
+                                            "Invalid offset+size");
+                       return -EINVAL;
+               }
+
+               btf_verifier_log_vsi(env, t, vsi, NULL);
+               sum += vsi->size;
+       }
+
+       if (t->size < sum) {
+               btf_verifier_log_type(env, t, "Invalid btf_info size");
+               return -EINVAL;
+       }
+
+       return meta_needed;
+}
+
+static int btf_datasec_resolve(struct btf_verifier_env *env,
+                              const struct resolve_vertex *v)
+{
+       const struct btf_var_secinfo *vsi;
+       struct btf *btf = env->btf;
+       u16 i;
+
+       for_each_vsi_from(i, v->next_member, v->t, vsi) {
+               u32 var_type_id = vsi->type, type_id, type_size = 0;
+               const struct btf_type *var_type = btf_type_by_id(env->btf,
+                                                                var_type_id);
+               if (!var_type || !btf_type_is_var(var_type)) {
+                       btf_verifier_log_vsi(env, v->t, vsi,
+                                            "Not a VAR kind member");
+                       return -EINVAL;
+               }
+
+               if (!env_type_is_resolve_sink(env, var_type) &&
+                   !env_type_is_resolved(env, var_type_id)) {
+                       env_stack_set_next_member(env, i + 1);
+                       return env_stack_push(env, var_type, var_type_id);
+               }
+
+               type_id = var_type->type;
+               if (!btf_type_id_size(btf, &type_id, &type_size)) {
+                       btf_verifier_log_vsi(env, v->t, vsi, "Invalid type");
+                       return -EINVAL;
+               }
+
+               if (vsi->size < type_size) {
+                       btf_verifier_log_vsi(env, v->t, vsi, "Invalid size");
+                       return -EINVAL;
+               }
+       }
+
+       env_stack_pop_resolved(env, 0, 0);
+       return 0;
+}
+
+static void btf_datasec_log(struct btf_verifier_env *env,
+                           const struct btf_type *t)
+{
+       btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
+}
+
+static void btf_datasec_seq_show(const struct btf *btf,
+                                const struct btf_type *t, u32 type_id,
+                                void *data, u8 bits_offset,
+                                struct seq_file *m)
+{
+       const struct btf_var_secinfo *vsi;
+       const struct btf_type *var;
+       u32 i;
+
+       seq_printf(m, "section (\"%s\") = {", __btf_name_by_offset(btf, t->name_off));
+       for_each_vsi(i, t, vsi) {
+               var = btf_type_by_id(btf, vsi->type);
+               if (i)
+                       seq_puts(m, ",");
+               btf_type_ops(var)->seq_show(btf, var, vsi->type,
+                                           data + vsi->offset, bits_offset, m);
+       }
+       seq_puts(m, "}");
+}
+
+static const struct btf_kind_operations datasec_ops = {
+       .check_meta             = btf_datasec_check_meta,
+       .resolve                = btf_datasec_resolve,
+       .check_member           = btf_df_check_member,
+       .check_kflag_member     = btf_df_check_kflag_member,
+       .log_details            = btf_datasec_log,
+       .seq_show               = btf_datasec_seq_show,
+};
+
 static int btf_func_proto_check(struct btf_verifier_env *env,
                                const struct btf_type *t)
 {
@@ -2542,6 +2913,8 @@ static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS] = {
        [BTF_KIND_RESTRICT] = &modifier_ops,
        [BTF_KIND_FUNC] = &func_ops,
        [BTF_KIND_FUNC_PROTO] = &func_proto_ops,
+       [BTF_KIND_VAR] = &var_ops,
+       [BTF_KIND_DATASEC] = &datasec_ops,
 };
 
 static s32 btf_check_meta(struct btf_verifier_env *env,
@@ -2622,13 +2995,17 @@ static bool btf_resolve_valid(struct btf_verifier_env *env,
        if (!env_type_is_resolved(env, type_id))
                return false;
 
-       if (btf_type_is_struct(t))
+       if (btf_type_is_struct(t) || btf_type_is_datasec(t))
                return !btf->resolved_ids[type_id] &&
-                       !btf->resolved_sizes[type_id];
+                      !btf->resolved_sizes[type_id];
 
-       if (btf_type_is_modifier(t) || btf_type_is_ptr(t)) {
+       if (btf_type_is_modifier(t) || btf_type_is_ptr(t) ||
+           btf_type_is_var(t)) {
                t = btf_type_id_resolve(btf, &type_id);
-               return t && !btf_type_is_modifier(t);
+               return t &&
+                      !btf_type_is_modifier(t) &&
+                      !btf_type_is_var(t) &&
+                      !btf_type_is_datasec(t);
        }
 
        if (btf_type_is_array(t)) {
index ff09d32a8a1be210e88a0e6f7f14596ee6b89f06..ace8c22c8b0e9b1819947aac5b10592822d3a290 100644 (file)
@@ -292,7 +292,8 @@ int bpf_prog_calc_tag(struct bpf_prog *fp)
                dst[i] = fp->insnsi[i];
                if (!was_ld_map &&
                    dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) &&
-                   dst[i].src_reg == BPF_PSEUDO_MAP_FD) {
+                   (dst[i].src_reg == BPF_PSEUDO_MAP_FD ||
+                    dst[i].src_reg == BPF_PSEUDO_MAP_VALUE)) {
                        was_ld_map = true;
                        dst[i].imm = 0;
                } else if (was_ld_map &&
@@ -438,6 +439,7 @@ struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
        u32 insn_adj_cnt, insn_rest, insn_delta = len - 1;
        const u32 cnt_max = S16_MAX;
        struct bpf_prog *prog_adj;
+       int err;
 
        /* Since our patchlet doesn't expand the image, we're done. */
        if (insn_delta == 0) {
@@ -453,8 +455,8 @@ struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
         * we afterwards may not fail anymore.
         */
        if (insn_adj_cnt > cnt_max &&
-           bpf_adj_branches(prog, off, off + 1, off + len, true))
-               return NULL;
+           (err = bpf_adj_branches(prog, off, off + 1, off + len, true)))
+               return ERR_PTR(err);
 
        /* Several new instructions need to be inserted. Make room
         * for them. Likely, there's no need for a new allocation as
@@ -463,7 +465,7 @@ struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
        prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt),
                                    GFP_USER);
        if (!prog_adj)
-               return NULL;
+               return ERR_PTR(-ENOMEM);
 
        prog_adj->len = insn_adj_cnt;
 
@@ -1096,13 +1098,13 @@ struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
                        continue;
 
                tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten);
-               if (!tmp) {
+               if (IS_ERR(tmp)) {
                        /* Patching may have repointed aux->prog during
                         * realloc from the original one, so we need to
                         * fix it up here on error.
                         */
                        bpf_jit_prog_release_other(prog, clone);
-                       return ERR_PTR(-ENOMEM);
+                       return tmp;
                }
 
                clone = tmp;
index 8974b3755670e37b0540f3d48ee0b29da951a341..3c18260403dde1df951448c600b6ef9ac61f5635 100644 (file)
@@ -162,10 +162,14 @@ static void cpu_map_kthread_stop(struct work_struct *work)
 static struct sk_buff *cpu_map_build_skb(struct bpf_cpu_map_entry *rcpu,
                                         struct xdp_frame *xdpf)
 {
+       unsigned int hard_start_headroom;
        unsigned int frame_size;
        void *pkt_data_start;
        struct sk_buff *skb;
 
+       /* Part of headroom was reserved to xdpf */
+       hard_start_headroom = sizeof(struct xdp_frame) +  xdpf->headroom;
+
        /* build_skb need to place skb_shared_info after SKB end, and
         * also want to know the memory "truesize".  Thus, need to
         * know the memory frame size backing xdp_buff.
@@ -183,15 +187,15 @@ static struct sk_buff *cpu_map_build_skb(struct bpf_cpu_map_entry *rcpu,
         * is not at a fixed memory location, with mixed length
         * packets, which is bad for cache-line hotness.
         */
-       frame_size = SKB_DATA_ALIGN(xdpf->len + xdpf->headroom) +
+       frame_size = SKB_DATA_ALIGN(xdpf->len + hard_start_headroom) +
                SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
 
-       pkt_data_start = xdpf->data - xdpf->headroom;
+       pkt_data_start = xdpf->data - hard_start_headroom;
        skb = build_skb(pkt_data_start, frame_size);
        if (!skb)
                return NULL;
 
-       skb_reserve(skb, xdpf->headroom);
+       skb_reserve(skb, hard_start_headroom);
        __skb_put(skb, xdpf->len);
        if (xdpf->metasize)
                skb_metadata_set(skb, xdpf->metasize);
@@ -205,6 +209,9 @@ static struct sk_buff *cpu_map_build_skb(struct bpf_cpu_map_entry *rcpu,
         * - RX ring dev queue index    (skb_record_rx_queue)
         */
 
+       /* Allow SKB to reuse area used by xdp_frame */
+       xdp_scrub_frame(xdpf);
+
        return skb;
 }
 
index de73f55e42fd4922ae04c6d4384b93937ef91cff..d9ce383c0f9ce24d9be2546cad822bcb52d0e719 100644 (file)
@@ -205,10 +205,11 @@ void print_bpf_insn(const struct bpf_insn_cbs *cbs,
                         * part of the ldimm64 insn is accessible.
                         */
                        u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm;
-                       bool map_ptr = insn->src_reg == BPF_PSEUDO_MAP_FD;
+                       bool is_ptr = insn->src_reg == BPF_PSEUDO_MAP_FD ||
+                                     insn->src_reg == BPF_PSEUDO_MAP_VALUE;
                        char tmp[64];
 
-                       if (map_ptr && !allow_ptr_leaks)
+                       if (is_ptr && !allow_ptr_leaks)
                                imm = 0;
 
                        verbose(cbs->private_data, "(%02x) r%d = %s\n",
index fed15cf94dca65431f7ee233514d80d24cb5420b..192d32e77db3f05c9507ef43592e70d6db1df14b 100644 (file)
@@ -23,7 +23,7 @@
 
 #define HTAB_CREATE_FLAG_MASK                                          \
        (BPF_F_NO_PREALLOC | BPF_F_NO_COMMON_LRU | BPF_F_NUMA_NODE |    \
-        BPF_F_RDONLY | BPF_F_WRONLY | BPF_F_ZERO_SEED)
+        BPF_F_ACCESS_MASK | BPF_F_ZERO_SEED)
 
 struct bucket {
        struct hlist_nulls_head head;
@@ -262,8 +262,8 @@ static int htab_map_alloc_check(union bpf_attr *attr)
                /* Guard against local DoS, and discourage production use. */
                return -EPERM;
 
-       if (attr->map_flags & ~HTAB_CREATE_FLAG_MASK)
-               /* reserved bits should not be used */
+       if (attr->map_flags & ~HTAB_CREATE_FLAG_MASK ||
+           !bpf_map_flags_access_ok(attr->map_flags))
                return -EINVAL;
 
        if (!lru && percpu_lru)
index 2ada5e21dfa62175d6cf9667ed4636e4c4ada659..4a8f390a2b821db8cff26f9716952b36013ab152 100644 (file)
@@ -554,19 +554,6 @@ struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type typ
 }
 EXPORT_SYMBOL(bpf_prog_get_type_path);
 
-static void bpf_evict_inode(struct inode *inode)
-{
-       enum bpf_type type;
-
-       truncate_inode_pages_final(&inode->i_data);
-       clear_inode(inode);
-
-       if (S_ISLNK(inode->i_mode))
-               kfree(inode->i_link);
-       if (!bpf_inode_type(inode, &type))
-               bpf_any_put(inode->i_private, type);
-}
-
 /*
  * Display the mount options in /proc/mounts.
  */
@@ -579,11 +566,28 @@ static int bpf_show_options(struct seq_file *m, struct dentry *root)
        return 0;
 }
 
+static void bpf_destroy_inode_deferred(struct rcu_head *head)
+{
+       struct inode *inode = container_of(head, struct inode, i_rcu);
+       enum bpf_type type;
+
+       if (S_ISLNK(inode->i_mode))
+               kfree(inode->i_link);
+       if (!bpf_inode_type(inode, &type))
+               bpf_any_put(inode->i_private, type);
+       free_inode_nonrcu(inode);
+}
+
+static void bpf_destroy_inode(struct inode *inode)
+{
+       call_rcu(&inode->i_rcu, bpf_destroy_inode_deferred);
+}
+
 static const struct super_operations bpf_super_ops = {
        .statfs         = simple_statfs,
        .drop_inode     = generic_delete_inode,
        .show_options   = bpf_show_options,
-       .evict_inode    = bpf_evict_inode,
+       .destroy_inode  = bpf_destroy_inode,
 };
 
 enum {
index 6b572e2de7fbee76434bb1a3d723394d7e0b3679..980e8f1f6cb5d0d53889714a953ca1a2abaa0462 100644 (file)
@@ -14,7 +14,7 @@ DEFINE_PER_CPU(struct bpf_cgroup_storage*, bpf_cgroup_storage[MAX_BPF_CGROUP_STO
 #ifdef CONFIG_CGROUP_BPF
 
 #define LOCAL_STORAGE_CREATE_FLAG_MASK                                 \
-       (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
+       (BPF_F_NUMA_NODE | BPF_F_ACCESS_MASK)
 
 struct bpf_cgroup_storage_map {
        struct bpf_map map;
@@ -282,8 +282,8 @@ static struct bpf_map *cgroup_storage_map_alloc(union bpf_attr *attr)
        if (attr->value_size > PAGE_SIZE)
                return ERR_PTR(-E2BIG);
 
-       if (attr->map_flags & ~LOCAL_STORAGE_CREATE_FLAG_MASK)
-               /* reserved bits should not be used */
+       if (attr->map_flags & ~LOCAL_STORAGE_CREATE_FLAG_MASK ||
+           !bpf_map_flags_access_ok(attr->map_flags))
                return ERR_PTR(-EINVAL);
 
        if (attr->max_entries)
index 93a5cbbde421c346e72b10cd56e04a13888d9e7b..e61630c2e50b28a3e342c4684b9be8e8141784a9 100644 (file)
@@ -538,7 +538,7 @@ static int trie_delete_elem(struct bpf_map *map, void *_key)
 #define LPM_KEY_SIZE_MIN       LPM_KEY_SIZE(LPM_DATA_SIZE_MIN)
 
 #define LPM_CREATE_FLAG_MASK   (BPF_F_NO_PREALLOC | BPF_F_NUMA_NODE |  \
-                                BPF_F_RDONLY | BPF_F_WRONLY)
+                                BPF_F_ACCESS_MASK)
 
 static struct bpf_map *trie_alloc(union bpf_attr *attr)
 {
@@ -553,6 +553,7 @@ static struct bpf_map *trie_alloc(union bpf_attr *attr)
        if (attr->max_entries == 0 ||
            !(attr->map_flags & BPF_F_NO_PREALLOC) ||
            attr->map_flags & ~LPM_CREATE_FLAG_MASK ||
+           !bpf_map_flags_access_ok(attr->map_flags) ||
            attr->key_size < LPM_KEY_SIZE_MIN ||
            attr->key_size > LPM_KEY_SIZE_MAX ||
            attr->value_size < LPM_VAL_SIZE_MIN ||
index b384ea9f3254987f1caa16dff0780900720d93ca..0b140d2368896d2d154bc0d0fa90b82aa238061c 100644 (file)
@@ -11,8 +11,7 @@
 #include "percpu_freelist.h"
 
 #define QUEUE_STACK_CREATE_FLAG_MASK \
-       (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
-
+       (BPF_F_NUMA_NODE | BPF_F_ACCESS_MASK)
 
 struct bpf_queue_stack {
        struct bpf_map map;
@@ -52,7 +51,8 @@ static int queue_stack_map_alloc_check(union bpf_attr *attr)
        /* check sanity of attributes */
        if (attr->max_entries == 0 || attr->key_size != 0 ||
            attr->value_size == 0 ||
-           attr->map_flags & ~QUEUE_STACK_CREATE_FLAG_MASK)
+           attr->map_flags & ~QUEUE_STACK_CREATE_FLAG_MASK ||
+           !bpf_map_flags_access_ok(attr->map_flags))
                return -EINVAL;
 
        if (attr->value_size > KMALLOC_MAX_SIZE)
index afca36f53c492718820ecacdb588af585dbb50e4..d995eedfdd1666d16653d179587f4b1d19da1af0 100644 (file)
@@ -166,13 +166,25 @@ void bpf_map_area_free(void *area)
        kvfree(area);
 }
 
+static u32 bpf_map_flags_retain_permanent(u32 flags)
+{
+       /* Some map creation flags are not tied to the map object but
+        * rather to the map fd instead, so they have no meaning upon
+        * map object inspection since multiple file descriptors with
+        * different (access) properties can exist here. Thus, given
+        * this has zero meaning for the map itself, lets clear these
+        * from here.
+        */
+       return flags & ~(BPF_F_RDONLY | BPF_F_WRONLY);
+}
+
 void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr)
 {
        map->map_type = attr->map_type;
        map->key_size = attr->key_size;
        map->value_size = attr->value_size;
        map->max_entries = attr->max_entries;
-       map->map_flags = attr->map_flags;
+       map->map_flags = bpf_map_flags_retain_permanent(attr->map_flags);
        map->numa_node = bpf_map_attr_numa_node(attr);
 }
 
@@ -343,6 +355,18 @@ static int bpf_map_release(struct inode *inode, struct file *filp)
        return 0;
 }
 
+static fmode_t map_get_sys_perms(struct bpf_map *map, struct fd f)
+{
+       fmode_t mode = f.file->f_mode;
+
+       /* Our file permissions may have been overridden by global
+        * map permissions facing syscall side.
+        */
+       if (READ_ONCE(map->frozen))
+               mode &= ~FMODE_CAN_WRITE;
+       return mode;
+}
+
 #ifdef CONFIG_PROC_FS
 static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
 {
@@ -364,14 +388,16 @@ static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
                   "max_entries:\t%u\n"
                   "map_flags:\t%#x\n"
                   "memlock:\t%llu\n"
-                  "map_id:\t%u\n",
+                  "map_id:\t%u\n"
+                  "frozen:\t%u\n",
                   map->map_type,
                   map->key_size,
                   map->value_size,
                   map->max_entries,
                   map->map_flags,
                   map->pages * 1ULL << PAGE_SHIFT,
-                  map->id);
+                  map->id,
+                  READ_ONCE(map->frozen));
 
        if (owner_prog_type) {
                seq_printf(m, "owner_prog_type:\t%u\n",
@@ -448,10 +474,10 @@ static int bpf_obj_name_cpy(char *dst, const char *src)
        const char *end = src + BPF_OBJ_NAME_LEN;
 
        memset(dst, 0, BPF_OBJ_NAME_LEN);
-
-       /* Copy all isalnum() and '_' char */
+       /* Copy all isalnum(), '_' and '.' chars. */
        while (src < end && *src) {
-               if (!isalnum(*src) && *src != '_')
+               if (!isalnum(*src) &&
+                   *src != '_' && *src != '.')
                        return -EINVAL;
                *dst++ = *src++;
        }
@@ -478,9 +504,16 @@ static int map_check_btf(struct bpf_map *map, const struct btf *btf,
        u32 key_size, value_size;
        int ret = 0;
 
-       key_type = btf_type_id_size(btf, &btf_key_id, &key_size);
-       if (!key_type || key_size != map->key_size)
-               return -EINVAL;
+       /* Some maps allow key to be unspecified. */
+       if (btf_key_id) {
+               key_type = btf_type_id_size(btf, &btf_key_id, &key_size);
+               if (!key_type || key_size != map->key_size)
+                       return -EINVAL;
+       } else {
+               key_type = btf_type_by_id(btf, 0);
+               if (!map->ops->map_check_btf)
+                       return -EINVAL;
+       }
 
        value_type = btf_type_id_size(btf, &btf_value_id, &value_size);
        if (!value_type || value_size != map->value_size)
@@ -489,6 +522,8 @@ static int map_check_btf(struct bpf_map *map, const struct btf *btf,
        map->spin_lock_off = btf_find_spin_lock(btf, value_type);
 
        if (map_value_has_spin_lock(map)) {
+               if (map->map_flags & BPF_F_RDONLY_PROG)
+                       return -EACCES;
                if (map->map_type != BPF_MAP_TYPE_HASH &&
                    map->map_type != BPF_MAP_TYPE_ARRAY &&
                    map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE)
@@ -545,7 +580,7 @@ static int map_create(union bpf_attr *attr)
        if (attr->btf_key_type_id || attr->btf_value_type_id) {
                struct btf *btf;
 
-               if (!attr->btf_key_type_id || !attr->btf_value_type_id) {
+               if (!attr->btf_value_type_id) {
                        err = -EINVAL;
                        goto free_map_nouncharge;
                }
@@ -713,8 +748,7 @@ static int map_lookup_elem(union bpf_attr *attr)
        map = __bpf_map_get(f);
        if (IS_ERR(map))
                return PTR_ERR(map);
-
-       if (!(f.file->f_mode & FMODE_CAN_READ)) {
+       if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
                err = -EPERM;
                goto err_put;
        }
@@ -843,8 +877,7 @@ static int map_update_elem(union bpf_attr *attr)
        map = __bpf_map_get(f);
        if (IS_ERR(map))
                return PTR_ERR(map);
-
-       if (!(f.file->f_mode & FMODE_CAN_WRITE)) {
+       if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
                err = -EPERM;
                goto err_put;
        }
@@ -955,8 +988,7 @@ static int map_delete_elem(union bpf_attr *attr)
        map = __bpf_map_get(f);
        if (IS_ERR(map))
                return PTR_ERR(map);
-
-       if (!(f.file->f_mode & FMODE_CAN_WRITE)) {
+       if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
                err = -EPERM;
                goto err_put;
        }
@@ -1007,8 +1039,7 @@ static int map_get_next_key(union bpf_attr *attr)
        map = __bpf_map_get(f);
        if (IS_ERR(map))
                return PTR_ERR(map);
-
-       if (!(f.file->f_mode & FMODE_CAN_READ)) {
+       if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
                err = -EPERM;
                goto err_put;
        }
@@ -1075,8 +1106,7 @@ static int map_lookup_and_delete_elem(union bpf_attr *attr)
        map = __bpf_map_get(f);
        if (IS_ERR(map))
                return PTR_ERR(map);
-
-       if (!(f.file->f_mode & FMODE_CAN_WRITE)) {
+       if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
                err = -EPERM;
                goto err_put;
        }
@@ -1118,6 +1148,36 @@ static int map_lookup_and_delete_elem(union bpf_attr *attr)
        return err;
 }
 
+#define BPF_MAP_FREEZE_LAST_FIELD map_fd
+
+static int map_freeze(const union bpf_attr *attr)
+{
+       int err = 0, ufd = attr->map_fd;
+       struct bpf_map *map;
+       struct fd f;
+
+       if (CHECK_ATTR(BPF_MAP_FREEZE))
+               return -EINVAL;
+
+       f = fdget(ufd);
+       map = __bpf_map_get(f);
+       if (IS_ERR(map))
+               return PTR_ERR(map);
+       if (READ_ONCE(map->frozen)) {
+               err = -EBUSY;
+               goto err_put;
+       }
+       if (!capable(CAP_SYS_ADMIN)) {
+               err = -EPERM;
+               goto err_put;
+       }
+
+       WRITE_ONCE(map->frozen, true);
+err_put:
+       fdput(f);
+       return err;
+}
+
 static const struct bpf_prog_ops * const bpf_prog_types[] = {
 #define BPF_PROG_TYPE(_id, _name) \
        [_id] = & _name ## _prog_ops,
@@ -1557,7 +1617,8 @@ static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr)
        /* eBPF programs must be GPL compatible to use GPL-ed functions */
        is_gpl = license_is_gpl_compatible(license);
 
-       if (attr->insn_cnt == 0 || attr->insn_cnt > BPF_MAXINSNS)
+       if (attr->insn_cnt == 0 ||
+           attr->insn_cnt > (capable(CAP_SYS_ADMIN) ? BPF_COMPLEXITY_LIMIT_INSNS : BPF_MAXINSNS))
                return -E2BIG;
        if (type != BPF_PROG_TYPE_SOCKET_FILTER &&
            type != BPF_PROG_TYPE_CGROUP_SKB &&
@@ -1948,7 +2009,7 @@ static int bpf_prog_query(const union bpf_attr *attr,
        return cgroup_bpf_prog_query(attr, uattr);
 }
 
-#define BPF_PROG_TEST_RUN_LAST_FIELD test.duration
+#define BPF_PROG_TEST_RUN_LAST_FIELD test.ctx_out
 
 static int bpf_prog_test_run(const union bpf_attr *attr,
                             union bpf_attr __user *uattr)
@@ -1961,6 +2022,14 @@ static int bpf_prog_test_run(const union bpf_attr *attr,
        if (CHECK_ATTR(BPF_PROG_TEST_RUN))
                return -EINVAL;
 
+       if ((attr->test.ctx_size_in && !attr->test.ctx_in) ||
+           (!attr->test.ctx_size_in && attr->test.ctx_in))
+               return -EINVAL;
+
+       if ((attr->test.ctx_size_out && !attr->test.ctx_out) ||
+           (!attr->test.ctx_size_out && attr->test.ctx_out))
+               return -EINVAL;
+
        prog = bpf_prog_get(attr->test.prog_fd);
        if (IS_ERR(prog))
                return PTR_ERR(prog);
@@ -2071,13 +2140,26 @@ static int bpf_map_get_fd_by_id(const union bpf_attr *attr)
 }
 
 static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog,
-                                             unsigned long addr)
+                                             unsigned long addr, u32 *off,
+                                             u32 *type)
 {
+       const struct bpf_map *map;
        int i;
 
-       for (i = 0; i < prog->aux->used_map_cnt; i++)
-               if (prog->aux->used_maps[i] == (void *)addr)
-                       return prog->aux->used_maps[i];
+       for (i = 0, *off = 0; i < prog->aux->used_map_cnt; i++) {
+               map = prog->aux->used_maps[i];
+               if (map == (void *)addr) {
+                       *type = BPF_PSEUDO_MAP_FD;
+                       return map;
+               }
+               if (!map->ops->map_direct_value_meta)
+                       continue;
+               if (!map->ops->map_direct_value_meta(map, addr, off)) {
+                       *type = BPF_PSEUDO_MAP_VALUE;
+                       return map;
+               }
+       }
+
        return NULL;
 }
 
@@ -2085,6 +2167,7 @@ static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog)
 {
        const struct bpf_map *map;
        struct bpf_insn *insns;
+       u32 off, type;
        u64 imm;
        int i;
 
@@ -2112,11 +2195,11 @@ static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog)
                        continue;
 
                imm = ((u64)insns[i + 1].imm << 32) | (u32)insns[i].imm;
-               map = bpf_map_from_imm(prog, imm);
+               map = bpf_map_from_imm(prog, imm, &off, &type);
                if (map) {
-                       insns[i].src_reg = BPF_PSEUDO_MAP_FD;
+                       insns[i].src_reg = type;
                        insns[i].imm = map->id;
-                       insns[i + 1].imm = 0;
+                       insns[i + 1].imm = off;
                        continue;
                }
        }
@@ -2706,6 +2789,9 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
        case BPF_MAP_GET_NEXT_KEY:
                err = map_get_next_key(&attr);
                break;
+       case BPF_MAP_FREEZE:
+               err = map_freeze(&attr);
+               break;
        case BPF_PROG_LOAD:
                err = bpf_prog_load(&attr, uattr);
                break;
index 2fe89138309a6cbc38a8507cbfbf42a1f8566119..f25b7c9c20ba3a92b431e12ac3a34a52001e14b5 100644 (file)
@@ -176,7 +176,6 @@ struct bpf_verifier_stack_elem {
        struct bpf_verifier_stack_elem *next;
 };
 
-#define BPF_COMPLEXITY_LIMIT_INSNS     131072
 #define BPF_COMPLEXITY_LIMIT_STACK     1024
 #define BPF_COMPLEXITY_LIMIT_STATES    64
 
@@ -1092,7 +1091,7 @@ static int check_subprogs(struct bpf_verifier_env *env)
         */
        subprog[env->subprog_cnt].start = insn_cnt;
 
-       if (env->log.level > 1)
+       if (env->log.level & BPF_LOG_LEVEL2)
                for (i = 0; i < env->subprog_cnt; i++)
                        verbose(env, "func#%d @%d\n", i, subprog[i].start);
 
@@ -1139,6 +1138,7 @@ static int mark_reg_read(struct bpf_verifier_env *env,
                         struct bpf_reg_state *parent)
 {
        bool writes = parent == state->parent; /* Observe write marks */
+       int cnt = 0;
 
        while (parent) {
                /* if read wasn't screened by an earlier write ... */
@@ -1150,12 +1150,25 @@ static int mark_reg_read(struct bpf_verifier_env *env,
                                parent->var_off.value, parent->off);
                        return -EFAULT;
                }
+               if (parent->live & REG_LIVE_READ)
+                       /* The parentage chain never changes and
+                        * this parent was already marked as LIVE_READ.
+                        * There is no need to keep walking the chain again and
+                        * keep re-marking all parents as LIVE_READ.
+                        * This case happens when the same register is read
+                        * multiple times without writes into it in-between.
+                        */
+                       break;
                /* ... then we depend on parent's value */
                parent->live |= REG_LIVE_READ;
                state = parent;
                parent = state->parent;
                writes = true;
+               cnt++;
        }
+
+       if (env->longest_mark_read_walk < cnt)
+               env->longest_mark_read_walk = cnt;
        return 0;
 }
 
@@ -1413,7 +1426,7 @@ static int check_stack_access(struct bpf_verifier_env *env,
                char tn_buf[48];
 
                tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
-               verbose(env, "variable stack access var_off=%s off=%d size=%d",
+               verbose(env, "variable stack access var_off=%s off=%d size=%d\n",
                        tn_buf, off, size);
                return -EACCES;
        }
@@ -1426,6 +1439,28 @@ static int check_stack_access(struct bpf_verifier_env *env,
        return 0;
 }
 
+static int check_map_access_type(struct bpf_verifier_env *env, u32 regno,
+                                int off, int size, enum bpf_access_type type)
+{
+       struct bpf_reg_state *regs = cur_regs(env);
+       struct bpf_map *map = regs[regno].map_ptr;
+       u32 cap = bpf_map_flags_to_cap(map);
+
+       if (type == BPF_WRITE && !(cap & BPF_MAP_CAN_WRITE)) {
+               verbose(env, "write into map forbidden, value_size=%d off=%d size=%d\n",
+                       map->value_size, off, size);
+               return -EACCES;
+       }
+
+       if (type == BPF_READ && !(cap & BPF_MAP_CAN_READ)) {
+               verbose(env, "read from map forbidden, value_size=%d off=%d size=%d\n",
+                       map->value_size, off, size);
+               return -EACCES;
+       }
+
+       return 0;
+}
+
 /* check read/write into map element returned by bpf_map_lookup_elem() */
 static int __check_map_access(struct bpf_verifier_env *env, u32 regno, int off,
                              int size, bool zero_size_allowed)
@@ -1455,7 +1490,7 @@ static int check_map_access(struct bpf_verifier_env *env, u32 regno,
         * need to try adding each of min_value and max_value to off
         * to make sure our theoretical access will be safe.
         */
-       if (env->log.level)
+       if (env->log.level & BPF_LOG_LEVEL)
                print_verifier_state(env, state);
 
        /* The minimum value is only important with signed
@@ -1898,8 +1933,9 @@ static int check_max_stack_depth(struct bpf_verifier_env *env)
                }
                frame++;
                if (frame >= MAX_CALL_FRAMES) {
-                       WARN_ONCE(1, "verifier bug. Call stack is too deep\n");
-                       return -EFAULT;
+                       verbose(env, "the call stack of %d frames is too deep !\n",
+                               frame);
+                       return -E2BIG;
                }
                goto process_func;
        }
@@ -2011,7 +2047,9 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
                        verbose(env, "R%d leaks addr into map\n", value_regno);
                        return -EACCES;
                }
-
+               err = check_map_access_type(env, regno, off, size, t);
+               if (err)
+                       return err;
                err = check_map_access(env, regno, off, size, false);
                if (!err && t == BPF_READ && value_regno >= 0)
                        mark_reg_unknown(env, regs, value_regno);
@@ -2157,6 +2195,29 @@ static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_ins
                                BPF_SIZE(insn->code), BPF_WRITE, -1, true);
 }
 
+static int __check_stack_boundary(struct bpf_verifier_env *env, u32 regno,
+                                 int off, int access_size,
+                                 bool zero_size_allowed)
+{
+       struct bpf_reg_state *reg = reg_state(env, regno);
+
+       if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 ||
+           access_size < 0 || (access_size == 0 && !zero_size_allowed)) {
+               if (tnum_is_const(reg->var_off)) {
+                       verbose(env, "invalid stack type R%d off=%d access_size=%d\n",
+                               regno, off, access_size);
+               } else {
+                       char tn_buf[48];
+
+                       tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
+                       verbose(env, "invalid stack type R%d var_off=%s access_size=%d\n",
+                               regno, tn_buf, access_size);
+               }
+               return -EACCES;
+       }
+       return 0;
+}
+
 /* when register 'regno' is passed into function that will read 'access_size'
  * bytes from that pointer, make sure that it's within stack boundary
  * and all elements of stack are initialized.
@@ -2169,7 +2230,7 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
 {
        struct bpf_reg_state *reg = reg_state(env, regno);
        struct bpf_func_state *state = func(env, reg);
-       int off, i, slot, spi;
+       int err, min_off, max_off, i, slot, spi;
 
        if (reg->type != PTR_TO_STACK) {
                /* Allow zero-byte read from NULL, regardless of pointer type */
@@ -2183,21 +2244,57 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
                return -EACCES;
        }
 
-       /* Only allow fixed-offset stack reads */
-       if (!tnum_is_const(reg->var_off)) {
-               char tn_buf[48];
+       if (tnum_is_const(reg->var_off)) {
+               min_off = max_off = reg->var_off.value + reg->off;
+               err = __check_stack_boundary(env, regno, min_off, access_size,
+                                            zero_size_allowed);
+               if (err)
+                       return err;
+       } else {
+               /* Variable offset is prohibited for unprivileged mode for
+                * simplicity since it requires corresponding support in
+                * Spectre masking for stack ALU.
+                * See also retrieve_ptr_limit().
+                */
+               if (!env->allow_ptr_leaks) {
+                       char tn_buf[48];
 
-               tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
-               verbose(env, "invalid variable stack read R%d var_off=%s\n",
-                       regno, tn_buf);
-               return -EACCES;
-       }
-       off = reg->off + reg->var_off.value;
-       if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 ||
-           access_size < 0 || (access_size == 0 && !zero_size_allowed)) {
-               verbose(env, "invalid stack type R%d off=%d access_size=%d\n",
-                       regno, off, access_size);
-               return -EACCES;
+                       tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
+                       verbose(env, "R%d indirect variable offset stack access prohibited for !root, var_off=%s\n",
+                               regno, tn_buf);
+                       return -EACCES;
+               }
+               /* Only initialized buffer on stack is allowed to be accessed
+                * with variable offset. With uninitialized buffer it's hard to
+                * guarantee that whole memory is marked as initialized on
+                * helper return since specific bounds are unknown what may
+                * cause uninitialized stack leaking.
+                */
+               if (meta && meta->raw_mode)
+                       meta = NULL;
+
+               if (reg->smax_value >= BPF_MAX_VAR_OFF ||
+                   reg->smax_value <= -BPF_MAX_VAR_OFF) {
+                       verbose(env, "R%d unbounded indirect variable offset stack access\n",
+                               regno);
+                       return -EACCES;
+               }
+               min_off = reg->smin_value + reg->off;
+               max_off = reg->smax_value + reg->off;
+               err = __check_stack_boundary(env, regno, min_off, access_size,
+                                            zero_size_allowed);
+               if (err) {
+                       verbose(env, "R%d min value is outside of stack bound\n",
+                               regno);
+                       return err;
+               }
+               err = __check_stack_boundary(env, regno, max_off, access_size,
+                                            zero_size_allowed);
+               if (err) {
+                       verbose(env, "R%d max value is outside of stack bound\n",
+                               regno);
+                       return err;
+               }
        }
 
        if (meta && meta->raw_mode) {
@@ -2206,10 +2303,10 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
                return 0;
        }
 
-       for (i = 0; i < access_size; i++) {
+       for (i = min_off; i < max_off + access_size; i++) {
                u8 *stype;
 
-               slot = -(off + i) - 1;
+               slot = -i - 1;
                spi = slot / BPF_REG_SIZE;
                if (state->allocated_stack <= slot)
                        goto err;
@@ -2222,8 +2319,16 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
                        goto mark;
                }
 err:
-               verbose(env, "invalid indirect read from stack off %d+%d size %d\n",
-                       off, i, access_size);
+               if (tnum_is_const(reg->var_off)) {
+                       verbose(env, "invalid indirect read from stack off %d+%d size %d\n",
+                               min_off, i - min_off, access_size);
+               } else {
+                       char tn_buf[48];
+
+                       tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
+                       verbose(env, "invalid indirect read from stack var_off %s+%d size %d\n",
+                               tn_buf, i - min_off, access_size);
+               }
                return -EACCES;
 mark:
                /* reading any byte out of 8-byte 'spill_slot' will cause
@@ -2232,7 +2337,7 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
                mark_reg_read(env, &state->stack[spi].spilled_ptr,
                              state->stack[spi].spilled_ptr.parent);
        }
-       return update_stack_depth(env, state, off);
+       return update_stack_depth(env, state, min_off);
 }
 
 static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
@@ -2247,6 +2352,10 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
                return check_packet_access(env, regno, reg->off, access_size,
                                           zero_size_allowed);
        case PTR_TO_MAP_VALUE:
+               if (check_map_access_type(env, regno, reg->off, access_size,
+                                         meta && meta->raw_mode ? BPF_WRITE :
+                                         BPF_READ))
+                       return -EACCES;
                return check_map_access(env, regno, reg->off, access_size,
                                        zero_size_allowed);
        default: /* scalar_value|ptr_to_stack or invalid ptr */
@@ -2905,7 +3014,7 @@ static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
        /* and go analyze first insn of the callee */
        *insn_idx = target_insn;
 
-       if (env->log.level) {
+       if (env->log.level & BPF_LOG_LEVEL) {
                verbose(env, "caller:\n");
                print_verifier_state(env, caller);
                verbose(env, "callee:\n");
@@ -2945,7 +3054,7 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
                return err;
 
        *insn_idx = callee->callsite + 1;
-       if (env->log.level) {
+       if (env->log.level & BPF_LOG_LEVEL) {
                verbose(env, "returning from callee:\n");
                print_verifier_state(env, callee);
                verbose(env, "to caller at %d:\n", *insn_idx);
@@ -2979,6 +3088,7 @@ record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
                int func_id, int insn_idx)
 {
        struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx];
+       struct bpf_map *map = meta->map_ptr;
 
        if (func_id != BPF_FUNC_tail_call &&
            func_id != BPF_FUNC_map_lookup_elem &&
@@ -2989,11 +3099,24 @@ record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
            func_id != BPF_FUNC_map_peek_elem)
                return 0;
 
-       if (meta->map_ptr == NULL) {
+       if (map == NULL) {
                verbose(env, "kernel subsystem misconfigured verifier\n");
                return -EINVAL;
        }
 
+       /* In case of read-only, some additional restrictions
+        * need to be applied in order to prevent altering the
+        * state of the map from program side.
+        */
+       if ((map->map_flags & BPF_F_RDONLY_PROG) &&
+           (func_id == BPF_FUNC_map_delete_elem ||
+            func_id == BPF_FUNC_map_update_elem ||
+            func_id == BPF_FUNC_map_push_elem ||
+            func_id == BPF_FUNC_map_pop_elem)) {
+               verbose(env, "write into map forbidden\n");
+               return -EACCES;
+       }
+
        if (!BPF_MAP_PTR(aux->map_state))
                bpf_map_ptr_store(aux, meta->map_ptr,
                                  meta->map_ptr->unpriv_array);
@@ -3284,6 +3407,9 @@ static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
 
        switch (ptr_reg->type) {
        case PTR_TO_STACK:
+               /* Indirect variable offset stack access is prohibited in
+                * unprivileged mode so it's not handled here.
+                */
                off = ptr_reg->off + ptr_reg->var_off.value;
                if (mask_to_left)
                        *ptr_limit = MAX_BPF_STACK + off;
@@ -4968,23 +5094,17 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
                        insn->dst_reg);
                return -EACCES;
        }
-       if (env->log.level)
+       if (env->log.level & BPF_LOG_LEVEL)
                print_verifier_state(env, this_branch->frame[this_branch->curframe]);
        return 0;
 }
 
-/* return the map pointer stored inside BPF_LD_IMM64 instruction */
-static struct bpf_map *ld_imm64_to_map_ptr(struct bpf_insn *insn)
-{
-       u64 imm64 = ((u64) (u32) insn[0].imm) | ((u64) (u32) insn[1].imm) << 32;
-
-       return (struct bpf_map *) (unsigned long) imm64;
-}
-
 /* verify BPF_LD_IMM64 instruction */
 static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn)
 {
+       struct bpf_insn_aux_data *aux = cur_aux(env);
        struct bpf_reg_state *regs = cur_regs(env);
+       struct bpf_map *map;
        int err;
 
        if (BPF_SIZE(insn->code) != BPF_DW) {
@@ -5008,11 +5128,22 @@ static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn)
                return 0;
        }
 
-       /* replace_map_fd_with_map_ptr() should have caught bad ld_imm64 */
-       BUG_ON(insn->src_reg != BPF_PSEUDO_MAP_FD);
+       map = env->used_maps[aux->map_index];
+       mark_reg_known_zero(env, regs, insn->dst_reg);
+       regs[insn->dst_reg].map_ptr = map;
+
+       if (insn->src_reg == BPF_PSEUDO_MAP_VALUE) {
+               regs[insn->dst_reg].type = PTR_TO_MAP_VALUE;
+               regs[insn->dst_reg].off = aux->map_off;
+               if (map_value_has_spin_lock(map))
+                       regs[insn->dst_reg].id = ++env->id_gen;
+       } else if (insn->src_reg == BPF_PSEUDO_MAP_FD) {
+               regs[insn->dst_reg].type = CONST_PTR_TO_MAP;
+       } else {
+               verbose(env, "bpf verifier is misconfigured\n");
+               return -EINVAL;
+       }
 
-       regs[insn->dst_reg].type = CONST_PTR_TO_MAP;
-       regs[insn->dst_reg].map_ptr = ld_imm64_to_map_ptr(insn);
        return 0;
 }
 
@@ -5266,13 +5397,13 @@ static int check_cfg(struct bpf_verifier_env *env)
        int ret = 0;
        int i, t;
 
-       insn_state = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
+       insn_state = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
        if (!insn_state)
                return -ENOMEM;
 
-       insn_stack = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
+       insn_stack = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
        if (!insn_stack) {
-               kfree(insn_state);
+               kvfree(insn_state);
                return -ENOMEM;
        }
 
@@ -5370,8 +5501,8 @@ static int check_cfg(struct bpf_verifier_env *env)
        ret = 0; /* cfg looks good */
 
 err_free:
-       kfree(insn_state);
-       kfree(insn_stack);
+       kvfree(insn_state);
+       kvfree(insn_stack);
        return ret;
 }
 
@@ -6114,11 +6245,13 @@ static int propagate_liveness(struct bpf_verifier_env *env,
 static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
 {
        struct bpf_verifier_state_list *new_sl;
-       struct bpf_verifier_state_list *sl;
+       struct bpf_verifier_state_list *sl, **pprev;
        struct bpf_verifier_state *cur = env->cur_state, *new;
        int i, j, err, states_cnt = 0;
 
-       sl = env->explored_states[insn_idx];
+       pprev = &env->explored_states[insn_idx];
+       sl = *pprev;
+
        if (!sl)
                /* this 'insn_idx' instruction wasn't marked, so we will not
                 * be doing state search here
@@ -6129,6 +6262,7 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
 
        while (sl != STATE_LIST_MARK) {
                if (states_equal(env, &sl->state, cur)) {
+                       sl->hit_cnt++;
                        /* reached equivalent register/stack state,
                         * prune the search.
                         * Registers read by the continuation are read by us.
@@ -6144,10 +6278,40 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
                                return err;
                        return 1;
                }
-               sl = sl->next;
                states_cnt++;
+               sl->miss_cnt++;
+               /* heuristic to determine whether this state is beneficial
+                * to keep checking from state equivalence point of view.
+                * Higher numbers increase max_states_per_insn and verification time,
+                * but do not meaningfully decrease insn_processed.
+                */
+               if (sl->miss_cnt > sl->hit_cnt * 3 + 3) {
+                       /* the state is unlikely to be useful. Remove it to
+                        * speed up verification
+                        */
+                       *pprev = sl->next;
+                       if (sl->state.frame[0]->regs[0].live & REG_LIVE_DONE) {
+                               free_verifier_state(&sl->state, false);
+                               kfree(sl);
+                               env->peak_states--;
+                       } else {
+                               /* cannot free this state, since parentage chain may
+                                * walk it later. Add it for free_list instead to
+                                * be freed at the end of verification
+                                */
+                               sl->next = env->free_list;
+                               env->free_list = sl;
+                       }
+                       sl = *pprev;
+                       continue;
+               }
+               pprev = &sl->next;
+               sl = *pprev;
        }
 
+       if (env->max_states_per_insn < states_cnt)
+               env->max_states_per_insn = states_cnt;
+
        if (!env->allow_ptr_leaks && states_cnt > BPF_COMPLEXITY_LIMIT_STATES)
                return 0;
 
@@ -6161,6 +6325,8 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
        new_sl = kzalloc(sizeof(struct bpf_verifier_state_list), GFP_KERNEL);
        if (!new_sl)
                return -ENOMEM;
+       env->total_states++;
+       env->peak_states++;
 
        /* add new state to the head of linked list */
        new = &new_sl->state;
@@ -6245,8 +6411,7 @@ static int do_check(struct bpf_verifier_env *env)
        struct bpf_verifier_state *state;
        struct bpf_insn *insns = env->prog->insnsi;
        struct bpf_reg_state *regs;
-       int insn_cnt = env->prog->len, i;
-       int insn_processed = 0;
+       int insn_cnt = env->prog->len;
        bool do_print_state = false;
 
        env->prev_linfo = NULL;
@@ -6281,10 +6446,10 @@ static int do_check(struct bpf_verifier_env *env)
                insn = &insns[env->insn_idx];
                class = BPF_CLASS(insn->code);
 
-               if (++insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) {
+               if (++env->insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) {
                        verbose(env,
                                "BPF program is too large. Processed %d insn\n",
-                               insn_processed);
+                               env->insn_processed);
                        return -E2BIG;
                }
 
@@ -6293,7 +6458,7 @@ static int do_check(struct bpf_verifier_env *env)
                        return err;
                if (err == 1) {
                        /* found equivalent state, can prune the search */
-                       if (env->log.level) {
+                       if (env->log.level & BPF_LOG_LEVEL) {
                                if (do_print_state)
                                        verbose(env, "\nfrom %d to %d%s: safe\n",
                                                env->prev_insn_idx, env->insn_idx,
@@ -6311,8 +6476,9 @@ static int do_check(struct bpf_verifier_env *env)
                if (need_resched())
                        cond_resched();
 
-               if (env->log.level > 1 || (env->log.level && do_print_state)) {
-                       if (env->log.level > 1)
+               if (env->log.level & BPF_LOG_LEVEL2 ||
+                   (env->log.level & BPF_LOG_LEVEL && do_print_state)) {
+                       if (env->log.level & BPF_LOG_LEVEL2)
                                verbose(env, "%d:", env->insn_idx);
                        else
                                verbose(env, "\nfrom %d to %d%s:",
@@ -6323,7 +6489,7 @@ static int do_check(struct bpf_verifier_env *env)
                        do_print_state = false;
                }
 
-               if (env->log.level) {
+               if (env->log.level & BPF_LOG_LEVEL) {
                        const struct bpf_insn_cbs cbs = {
                                .cb_print       = verbose,
                                .private_data   = env,
@@ -6588,16 +6754,6 @@ static int do_check(struct bpf_verifier_env *env)
                env->insn_idx++;
        }
 
-       verbose(env, "processed %d insns (limit %d), stack depth ",
-               insn_processed, BPF_COMPLEXITY_LIMIT_INSNS);
-       for (i = 0; i < env->subprog_cnt; i++) {
-               u32 depth = env->subprog_info[i].stack_depth;
-
-               verbose(env, "%d", depth);
-               if (i + 1 < env->subprog_cnt)
-                       verbose(env, "+");
-       }
-       verbose(env, "\n");
        env->prog->aux->stack_depth = env->subprog_info[0].stack_depth;
        return 0;
 }
@@ -6695,8 +6851,10 @@ static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env)
                }
 
                if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) {
+                       struct bpf_insn_aux_data *aux;
                        struct bpf_map *map;
                        struct fd f;
+                       u64 addr;
 
                        if (i == insn_cnt - 1 || insn[1].code != 0 ||
                            insn[1].dst_reg != 0 || insn[1].src_reg != 0 ||
@@ -6705,13 +6863,19 @@ static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env)
                                return -EINVAL;
                        }
 
-                       if (insn->src_reg == 0)
+                       if (insn[0].src_reg == 0)
                                /* valid generic load 64-bit imm */
                                goto next_insn;
 
-                       if (insn[0].src_reg != BPF_PSEUDO_MAP_FD ||
-                           insn[1].imm != 0) {
-                               verbose(env, "unrecognized bpf_ld_imm64 insn\n");
+                       /* In final convert_pseudo_ld_imm64() step, this is
+                        * converted into regular 64-bit imm load insn.
+                        */
+                       if ((insn[0].src_reg != BPF_PSEUDO_MAP_FD &&
+                            insn[0].src_reg != BPF_PSEUDO_MAP_VALUE) ||
+                           (insn[0].src_reg == BPF_PSEUDO_MAP_FD &&
+                            insn[1].imm != 0)) {
+                               verbose(env,
+                                       "unrecognized bpf_ld_imm64 insn\n");
                                return -EINVAL;
                        }
 
@@ -6729,16 +6893,47 @@ static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env)
                                return err;
                        }
 
-                       /* store map pointer inside BPF_LD_IMM64 instruction */
-                       insn[0].imm = (u32) (unsigned long) map;
-                       insn[1].imm = ((u64) (unsigned long) map) >> 32;
+                       aux = &env->insn_aux_data[i];
+                       if (insn->src_reg == BPF_PSEUDO_MAP_FD) {
+                               addr = (unsigned long)map;
+                       } else {
+                               u32 off = insn[1].imm;
+
+                               if (off >= BPF_MAX_VAR_OFF) {
+                                       verbose(env, "direct value offset of %u is not allowed\n", off);
+                                       fdput(f);
+                                       return -EINVAL;
+                               }
+
+                               if (!map->ops->map_direct_value_addr) {
+                                       verbose(env, "no direct value access support for this map type\n");
+                                       fdput(f);
+                                       return -EINVAL;
+                               }
+
+                               err = map->ops->map_direct_value_addr(map, &addr, off);
+                               if (err) {
+                                       verbose(env, "invalid access to map value pointer, value_size=%u off=%u\n",
+                                               map->value_size, off);
+                                       fdput(f);
+                                       return err;
+                               }
+
+                               aux->map_off = off;
+                               addr += off;
+                       }
+
+                       insn[0].imm = (u32)addr;
+                       insn[1].imm = addr >> 32;
 
                        /* check whether we recorded this map already */
-                       for (j = 0; j < env->used_map_cnt; j++)
+                       for (j = 0; j < env->used_map_cnt; j++) {
                                if (env->used_maps[j] == map) {
+                                       aux->map_index = j;
                                        fdput(f);
                                        goto next_insn;
                                }
+                       }
 
                        if (env->used_map_cnt >= MAX_USED_MAPS) {
                                fdput(f);
@@ -6755,6 +6950,8 @@ static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env)
                                fdput(f);
                                return PTR_ERR(map);
                        }
+
+                       aux->map_index = env->used_map_cnt;
                        env->used_maps[env->used_map_cnt++] = map;
 
                        if (bpf_map_is_cgroup_storage(map) &&
@@ -6860,8 +7057,13 @@ static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 of
        struct bpf_prog *new_prog;
 
        new_prog = bpf_patch_insn_single(env->prog, off, patch, len);
-       if (!new_prog)
+       if (IS_ERR(new_prog)) {
+               if (PTR_ERR(new_prog) == -ERANGE)
+                       verbose(env,
+                               "insn %d cannot be patched due to 16-bit range\n",
+                               env->insn_aux_data[off].orig_idx);
                return NULL;
+       }
        if (adjust_insn_aux_data(env, new_prog->len, off, len))
                return NULL;
        adjust_subprog_starts(env, off, len);
@@ -7803,6 +8005,14 @@ static void free_states(struct bpf_verifier_env *env)
        struct bpf_verifier_state_list *sl, *sln;
        int i;
 
+       sl = env->free_list;
+       while (sl) {
+               sln = sl->next;
+               free_verifier_state(&sl->state, false);
+               kfree(sl);
+               sl = sln;
+       }
+
        if (!env->explored_states)
                return;
 
@@ -7818,12 +8028,37 @@ static void free_states(struct bpf_verifier_env *env)
                        }
        }
 
-       kfree(env->explored_states);
+       kvfree(env->explored_states);
+}
+
+static void print_verification_stats(struct bpf_verifier_env *env)
+{
+       int i;
+
+       if (env->log.level & BPF_LOG_STATS) {
+               verbose(env, "verification time %lld usec\n",
+                       div_u64(env->verification_time, 1000));
+               verbose(env, "stack depth ");
+               for (i = 0; i < env->subprog_cnt; i++) {
+                       u32 depth = env->subprog_info[i].stack_depth;
+
+                       verbose(env, "%d", depth);
+                       if (i + 1 < env->subprog_cnt)
+                               verbose(env, "+");
+               }
+               verbose(env, "\n");
+       }
+       verbose(env, "processed %d insns (limit %d) max_states_per_insn %d "
+               "total_states %d peak_states %d mark_read %d\n",
+               env->insn_processed, BPF_COMPLEXITY_LIMIT_INSNS,
+               env->max_states_per_insn, env->total_states,
+               env->peak_states, env->longest_mark_read_walk);
 }
 
 int bpf_check(struct bpf_prog **prog, union bpf_attr *attr,
              union bpf_attr __user *uattr)
 {
+       u64 start_time = ktime_get_ns();
        struct bpf_verifier_env *env;
        struct bpf_verifier_log *log;
        int i, len, ret = -EINVAL;
@@ -7865,8 +8100,8 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr,
 
                ret = -EINVAL;
                /* log attributes have to be sane */
-               if (log->len_total < 128 || log->len_total > UINT_MAX >> 8 ||
-                   !log->level || !log->ubuf)
+               if (log->len_total < 128 || log->len_total > UINT_MAX >> 2 ||
+                   !log->level || !log->ubuf || log->level & ~BPF_LOG_MASK)
                        goto err_unlock;
        }
 
@@ -7889,7 +8124,7 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr,
                        goto skip_full_check;
        }
 
-       env->explored_states = kcalloc(env->prog->len,
+       env->explored_states = kvcalloc(env->prog->len,
                                       sizeof(struct bpf_verifier_state_list *),
                                       GFP_USER);
        ret = -ENOMEM;
@@ -7947,6 +8182,9 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr,
        if (ret == 0)
                ret = fixup_call_args(env);
 
+       env->verification_time = ktime_get_ns() - start_time;
+       print_verification_stats(env);
+
        if (log->level && bpf_verifier_log_full(log))
                ret = -ENOSPC;
        if (log->level && !log->ubuf) {
index 025f419d16f68be3e70cfcbdabe113ed655af6e4..6754f3ecfd943c97af0b865197d01366abac7c73 100644 (file)
@@ -564,6 +564,20 @@ static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st)
                cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
 }
 
+static inline bool can_rollback_cpu(struct cpuhp_cpu_state *st)
+{
+       if (IS_ENABLED(CONFIG_HOTPLUG_CPU))
+               return true;
+       /*
+        * When CPU hotplug is disabled, then taking the CPU down is not
+        * possible because takedown_cpu() and the architecture and
+        * subsystem specific mechanisms are not available. So the CPU
+        * which would be completely unplugged again needs to stay around
+        * in the current state.
+        */
+       return st->state <= CPUHP_BRINGUP_CPU;
+}
+
 static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
                              enum cpuhp_state target)
 {
@@ -574,8 +588,10 @@ static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
                st->state++;
                ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
                if (ret) {
-                       st->target = prev_state;
-                       undo_cpu_up(cpu, st);
+                       if (can_rollback_cpu(st)) {
+                               st->target = prev_state;
+                               undo_cpu_up(cpu, st);
+                       }
                        break;
                }
        }
index 45d51e8e26f62f27b8b91c3e53ddb2df20fd791b..a218e43cc38258ae6d7bed19f0d6e2ea852e2ac7 100644 (file)
@@ -706,7 +706,7 @@ static struct dma_debug_entry *dma_entry_alloc(void)
 #ifdef CONFIG_STACKTRACE
        entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES;
        entry->stacktrace.entries = entry->st_entries;
-       entry->stacktrace.skip = 2;
+       entry->stacktrace.skip = 1;
        save_stack_trace(&entry->stacktrace);
 #endif
 
index 72d06e302e9938dcaee6dbf57324d3ed31397035..534e01e7bc36854877d3361fcf2683ab09025d91 100644 (file)
@@ -2009,8 +2009,8 @@ event_sched_out(struct perf_event *event,
        event->pmu->del(event, 0);
        event->oncpu = -1;
 
-       if (event->pending_disable) {
-               event->pending_disable = 0;
+       if (READ_ONCE(event->pending_disable) >= 0) {
+               WRITE_ONCE(event->pending_disable, -1);
                state = PERF_EVENT_STATE_OFF;
        }
        perf_event_set_state(event, state);
@@ -2198,7 +2198,8 @@ EXPORT_SYMBOL_GPL(perf_event_disable);
 
 void perf_event_disable_inatomic(struct perf_event *event)
 {
-       event->pending_disable = 1;
+       WRITE_ONCE(event->pending_disable, smp_processor_id());
+       /* can fail, see perf_pending_event_disable() */
        irq_work_queue(&event->pending);
 }
 
@@ -5810,10 +5811,45 @@ void perf_event_wakeup(struct perf_event *event)
        }
 }
 
+static void perf_pending_event_disable(struct perf_event *event)
+{
+       int cpu = READ_ONCE(event->pending_disable);
+
+       if (cpu < 0)
+               return;
+
+       if (cpu == smp_processor_id()) {
+               WRITE_ONCE(event->pending_disable, -1);
+               perf_event_disable_local(event);
+               return;
+       }
+
+       /*
+        *  CPU-A                       CPU-B
+        *
+        *  perf_event_disable_inatomic()
+        *    @pending_disable = CPU-A;
+        *    irq_work_queue();
+        *
+        *  sched-out
+        *    @pending_disable = -1;
+        *
+        *                              sched-in
+        *                              perf_event_disable_inatomic()
+        *                                @pending_disable = CPU-B;
+        *                                irq_work_queue(); // FAILS
+        *
+        *  irq_work_run()
+        *    perf_pending_event()
+        *
+        * But the event runs on CPU-B and wants disabling there.
+        */
+       irq_work_queue_on(&event->pending, cpu);
+}
+
 static void perf_pending_event(struct irq_work *entry)
 {
-       struct perf_event *event = container_of(entry,
-                       struct perf_event, pending);
+       struct perf_event *event = container_of(entry, struct perf_event, pending);
        int rctx;
 
        rctx = perf_swevent_get_recursion_context();
@@ -5822,10 +5858,7 @@ static void perf_pending_event(struct irq_work *entry)
         * and we won't recurse 'further'.
         */
 
-       if (event->pending_disable) {
-               event->pending_disable = 0;
-               perf_event_disable_local(event);
-       }
+       perf_pending_event_disable(event);
 
        if (event->pending_wakeup) {
                event->pending_wakeup = 0;
@@ -10236,6 +10269,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
 
 
        init_waitqueue_head(&event->waitq);
+       event->pending_disable = -1;
        init_irq_work(&event->pending, perf_pending_event);
 
        mutex_init(&event->mmap_mutex);
index a4047321d7d8052b40302d4ed8c0aa8e649ba759..2545ac08cc77b0bb1df9df04be353bbe6a7a0575 100644 (file)
@@ -392,7 +392,7 @@ void *perf_aux_output_begin(struct perf_output_handle *handle,
                 * store that will be enabled on successful return
                 */
                if (!handle->size) { /* A, matches D */
-                       event->pending_disable = 1;
+                       event->pending_disable = smp_processor_id();
                        perf_output_wakeup(handle);
                        local_set(&rb->aux_nest, 0);
                        goto err_put;
@@ -480,7 +480,7 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size)
 
        if (wakeup) {
                if (handle->aux_flags & PERF_AUX_FLAG_TRUNCATED)
-                       handle->event->pending_disable = 1;
+                       handle->event->pending_disable = smp_processor_id();
                perf_output_wakeup(handle);
        }
 
index 3faef4a77f7103e004c6a26f9c4074750f23fc73..51128bea3846ca1c15cd622f0889602cd1688b78 100644 (file)
@@ -1449,6 +1449,10 @@ int irq_chip_set_vcpu_affinity_parent(struct irq_data *data, void *vcpu_info)
 int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on)
 {
        data = data->parent_data;
+
+       if (data->chip->flags & IRQCHIP_SKIP_SET_WAKE)
+               return 0;
+
        if (data->chip->irq_set_wake)
                return data->chip->irq_set_wake(data, on);
 
index 13539e12cd8034279c8f324242e4996301203d1a..9f8a709337cf802f2ddbf33f90a956d851f0c5b3 100644 (file)
@@ -558,6 +558,7 @@ int __init early_irq_init(void)
                alloc_masks(&desc[i], node);
                raw_spin_lock_init(&desc[i].lock);
                lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
+               mutex_init(&desc[i].request_mutex);
                desc_set_defaults(i, &desc[i], node, NULL, NULL);
        }
        return arch_early_irq_init();
index 34cdcbedda492b84cb610af67cb11113ea04065d..e16766ff184b5e57d636e987a70a1147d3b3ecb2 100644 (file)
@@ -4689,8 +4689,8 @@ static void free_zapped_rcu(struct rcu_head *ch)
                return;
 
        raw_local_irq_save(flags);
-       if (!graph_lock())
-               goto out_irq;
+       arch_spin_lock(&lockdep_lock);
+       current->lockdep_recursion = 1;
 
        /* closed head */
        pf = delayed_free.pf + (delayed_free.index ^ 1);
@@ -4702,8 +4702,8 @@ static void free_zapped_rcu(struct rcu_head *ch)
         */
        call_rcu_zapped(delayed_free.pf + delayed_free.index);
 
-       graph_unlock();
-out_irq:
+       current->lockdep_recursion = 0;
+       arch_spin_unlock(&lockdep_lock);
        raw_local_irq_restore(flags);
 }
 
@@ -4744,21 +4744,17 @@ static void lockdep_free_key_range_reg(void *start, unsigned long size)
 {
        struct pending_free *pf;
        unsigned long flags;
-       int locked;
 
        init_data_structures_once();
 
        raw_local_irq_save(flags);
-       locked = graph_lock();
-       if (!locked)
-               goto out_irq;
-
+       arch_spin_lock(&lockdep_lock);
+       current->lockdep_recursion = 1;
        pf = get_pending_free();
        __lockdep_free_key_range(pf, start, size);
        call_rcu_zapped(pf);
-
-       graph_unlock();
-out_irq:
+       current->lockdep_recursion = 0;
+       arch_spin_unlock(&lockdep_lock);
        raw_local_irq_restore(flags);
 
        /*
@@ -4911,9 +4907,8 @@ void lockdep_unregister_key(struct lock_class_key *key)
                return;
 
        raw_local_irq_save(flags);
-       if (!graph_lock())
-               goto out_irq;
-
+       arch_spin_lock(&lockdep_lock);
+       current->lockdep_recursion = 1;
        pf = get_pending_free();
        hlist_for_each_entry_rcu(k, hash_head, hash_entry) {
                if (k == key) {
@@ -4925,8 +4920,8 @@ void lockdep_unregister_key(struct lock_class_key *key)
        WARN_ON_ONCE(!found);
        __lockdep_free_key_range(pf, key, 1);
        call_rcu_zapped(pf);
-       graph_unlock();
-out_irq:
+       current->lockdep_recursion = 0;
+       arch_spin_unlock(&lockdep_lock);
        raw_local_irq_restore(flags);
 
        /* Wait until is_dynamic_key() has finished accessing k->hash_entry. */
index 771e93f9c43f826270c1927665fa5d6aaa8654e7..6f357f4fc85900db94f5a9dc45b098849e0e158d 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/hw_breakpoint.h>
 #include <linux/cn_proc.h>
 #include <linux/compat.h>
+#include <linux/sched/signal.h>
 
 /*
  * Access another process' address space via ptrace.
@@ -924,18 +925,26 @@ int ptrace_request(struct task_struct *child, long request,
                        ret = ptrace_setsiginfo(child, &siginfo);
                break;
 
-       case PTRACE_GETSIGMASK:
+       case PTRACE_GETSIGMASK: {
+               sigset_t *mask;
+
                if (addr != sizeof(sigset_t)) {
                        ret = -EINVAL;
                        break;
                }
 
-               if (copy_to_user(datavp, &child->blocked, sizeof(sigset_t)))
+               if (test_tsk_restore_sigmask(child))
+                       mask = &child->saved_sigmask;
+               else
+                       mask = &child->blocked;
+
+               if (copy_to_user(datavp, mask, sizeof(sigset_t)))
                        ret = -EFAULT;
                else
                        ret = 0;
 
                break;
+       }
 
        case PTRACE_SETSIGMASK: {
                sigset_t new_set;
@@ -961,6 +970,8 @@ int ptrace_request(struct task_struct *child, long request,
                child->blocked = new_set;
                spin_unlock_irq(&child->sighand->siglock);
 
+               clear_tsk_restore_sigmask(child);
+
                ret = 0;
                break;
        }
index fdab7eb6f3517af0ca9581a730b0771c28092fa0..40bd1e27b1b79f88e3ba23b413e5d0d7f1b28094 100644 (file)
@@ -7784,10 +7784,10 @@ static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
        if (cfs_rq->last_h_load_update == now)
                return;
 
-       cfs_rq->h_load_next = NULL;
+       WRITE_ONCE(cfs_rq->h_load_next, NULL);
        for_each_sched_entity(se) {
                cfs_rq = cfs_rq_of(se);
-               cfs_rq->h_load_next = se;
+               WRITE_ONCE(cfs_rq->h_load_next, se);
                if (cfs_rq->last_h_load_update == now)
                        break;
        }
@@ -7797,7 +7797,7 @@ static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
                cfs_rq->last_h_load_update = now;
        }
 
-       while ((se = cfs_rq->h_load_next) != NULL) {
+       while ((se = READ_ONCE(cfs_rq->h_load_next)) != NULL) {
                load = cfs_rq->h_load;
                load = div64_ul(load * se->avg.load_avg,
                        cfs_rq_load_avg(cfs_rq) + 1);
index 54a0347ca8128f09cdbbcc83e2e8f8eea633a7ab..df27e499956a1a5a816fd31081c15292cdeb6777 100644 (file)
@@ -149,7 +149,7 @@ static void populate_seccomp_data(struct seccomp_data *sd)
 
        sd->nr = syscall_get_nr(task, regs);
        sd->arch = syscall_get_arch();
-       syscall_get_arguments(task, regs, 0, 6, args);
+       syscall_get_arguments(task, regs, args);
        sd->args[0] = args[0];
        sd->args[1] = args[1];
        sd->args[2] = args[2];
index b7953934aa994e7993254aa6b04438815ed37f1f..f98448cf2defb5b2f212d005dcfa2a899252ae25 100644 (file)
@@ -3605,16 +3605,11 @@ SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
                if (unlikely(sig != kinfo.si_signo))
                        goto err;
 
+               /* Only allow sending arbitrary signals to yourself. */
+               ret = -EPERM;
                if ((task_pid(current) != pid) &&
-                   (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL)) {
-                       /* Only allow sending arbitrary signals to yourself. */
-                       ret = -EPERM;
-                       if (kinfo.si_code != SI_USER)
-                               goto err;
-
-                       /* Turn this into a regular kill signal. */
-                       prepare_kill_siginfo(sig, &kinfo);
-               }
+                   (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
+                       goto err;
        } else {
                prepare_kill_siginfo(sig, &kinfo);
        }
index e5da394d1ca3675ef6bc050660c1d5a0892915ca..c9ec050bcf46126286dba7d122b3a7240de6de65 100644 (file)
@@ -128,6 +128,7 @@ static int zero;
 static int __maybe_unused one = 1;
 static int __maybe_unused two = 2;
 static int __maybe_unused four = 4;
+static unsigned long zero_ul;
 static unsigned long one_ul = 1;
 static unsigned long long_max = LONG_MAX;
 static int one_hundred = 100;
@@ -1750,7 +1751,7 @@ static struct ctl_table fs_table[] = {
                .maxlen         = sizeof(files_stat.max_files),
                .mode           = 0644,
                .proc_handler   = proc_doulongvec_minmax,
-               .extra1         = &zero,
+               .extra1         = &zero_ul,
                .extra2         = &long_max,
        },
        {
index 2c97e8c2d29fb3351332e447323750247d950f58..0519a8805aab3f290e3fb437ff0903a0e4722d90 100644 (file)
@@ -594,7 +594,7 @@ static ktime_t alarm_timer_remaining(struct k_itimer *timr, ktime_t now)
 {
        struct alarm *alarm = &timr->it.alarm.alarmtimer;
 
-       return ktime_sub(now, alarm->node.expires);
+       return ktime_sub(alarm->node.expires, now);
 }
 
 /**
index c3f756f8534bba606b1fcee479bfcb0e05829502..9e3f79d4f5a850475763eeffb0d0bf8d8c8527c7 100644 (file)
@@ -783,6 +783,16 @@ u64 jiffies64_to_nsecs(u64 j)
 }
 EXPORT_SYMBOL(jiffies64_to_nsecs);
 
+u64 jiffies64_to_msecs(const u64 j)
+{
+#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
+       return (MSEC_PER_SEC / HZ) * j;
+#else
+       return div_u64(j * HZ_TO_MSEC_NUM, HZ_TO_MSEC_DEN);
+#endif
+}
+EXPORT_SYMBOL(jiffies64_to_msecs);
+
 /**
  * nsecs_to_jiffies64 - Convert nsecs in u64 to jiffies64
  *
index 21153e64bf1c366033213e90272438ba171b2822..6c24755655c752a3bf9f4bb914ddb251d9ab0d2e 100644 (file)
@@ -7041,12 +7041,16 @@ static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
        buf->private = 0;
 }
 
-static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
+static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
                                struct pipe_buffer *buf)
 {
        struct buffer_ref *ref = (struct buffer_ref *)buf->private;
 
+       if (ref->ref > INT_MAX/2)
+               return false;
+
        ref->ref++;
+       return true;
 }
 
 /* Pipe buffer operations for a buffer. */
index f93a56d2db275be64df083344b68ec65f3c32473..fa8fbff736d684734e89f05fb149c4e83436d8f4 100644 (file)
@@ -314,6 +314,7 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
        struct ring_buffer_event *event;
        struct ring_buffer *buffer;
        unsigned long irq_flags;
+       unsigned long args[6];
        int pc;
        int syscall_nr;
        int size;
@@ -347,7 +348,8 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
 
        entry = ring_buffer_event_data(event);
        entry->nr = syscall_nr;
-       syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args);
+       syscall_get_arguments(current, regs, args);
+       memcpy(entry->args, args, sizeof(unsigned long) * sys_data->nb_args);
 
        event_trigger_unlock_commit(trace_file, buffer, event, entry,
                                    irq_flags, pc);
@@ -583,6 +585,7 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
        struct syscall_metadata *sys_data;
        struct syscall_trace_enter *rec;
        struct hlist_head *head;
+       unsigned long args[6];
        bool valid_prog_array;
        int syscall_nr;
        int rctx;
@@ -613,8 +616,8 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
                return;
 
        rec->nr = syscall_nr;
-       syscall_get_arguments(current, regs, 0, sys_data->nb_args,
-                              (unsigned long *)&rec->args);
+       syscall_get_arguments(current, regs, args);
+       memcpy(&rec->args, args, sizeof(unsigned long) * sys_data->nb_args);
 
        if ((valid_prog_array &&
             !perf_call_bpf_enter(sys_data->enter_event, regs, sys_data, rec)) ||
index 403c9bd9041395a1d7919977acc7ed68d8c746ca..6a578723311328a6394941aeb9ff4eef6130b4ac 100644 (file)
@@ -554,13 +554,15 @@ static void softlockup_start_all(void)
 
 int lockup_detector_online_cpu(unsigned int cpu)
 {
-       watchdog_enable(cpu);
+       if (cpumask_test_cpu(cpu, &watchdog_allowed_mask))
+               watchdog_enable(cpu);
        return 0;
 }
 
 int lockup_detector_offline_cpu(unsigned int cpu)
 {
-       watchdog_disable(cpu);
+       if (cpumask_test_cpu(cpu, &watchdog_allowed_mask))
+               watchdog_disable(cpu);
        return 0;
 }
 
index 0d9e81779e373745c3e28497df424b415a50c3ac..188fc17c22022d8a67fe51fb0f673b6156e69d28 100644 (file)
@@ -219,6 +219,14 @@ config DEBUG_INFO_DWARF4
          But it significantly improves the success of resolving
          variables in gdb on optimized code.
 
+config DEBUG_INFO_BTF
+       bool "Generate BTF typeinfo"
+       depends on DEBUG_INFO
+       help
+         Generate deduplicated BTF type information from DWARF debug info.
+         Turning this on expects presence of pahole tool, which will convert
+         DWARF type info into equivalent deduplicated BTF type info.
+
 config GDB_SCRIPTS
        bool "Provide GDB scripts for kernel debugging"
        depends on DEBUG_INFO
index ea36dc355da131b4a45b71d8be6f1bc69a53e637..b396d328a7643b7c1984b8e87df9da88c45e7470 100644 (file)
@@ -1528,6 +1528,7 @@ EXPORT_SYMBOL(csum_and_copy_to_iter);
 size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
                struct iov_iter *i)
 {
+#ifdef CONFIG_CRYPTO
        struct ahash_request *hash = hashp;
        struct scatterlist sg;
        size_t copied;
@@ -1537,6 +1538,9 @@ size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
        ahash_request_set_crypt(hash, &sg, NULL, copied);
        crypto_ahash_update(hash);
        return copied;
+#else
+       return 0;
+#endif
 }
 EXPORT_SYMBOL(hash_and_copy_to_iter);
 
index 4525fb09484427297853ca5819dcfaffee9265f1..a8ede77afe0db70fa7992319c470c2a65c07bf58 100644 (file)
@@ -291,13 +291,14 @@ int lzogeneric1x_1_compress(const unsigned char *in, size_t in_len,
 {
        const unsigned char *ip = in;
        unsigned char *op = out;
+       unsigned char *data_start;
        size_t l = in_len;
        size_t t = 0;
        signed char state_offset = -2;
        unsigned int m4_max_offset;
 
-       // LZO v0 will never write 17 as first byte,
-       // so this is used to version the bitstream
+       // LZO v0 will never write 17 as first byte (except for zero-length
+       // input), so this is used to version the bitstream
        if (bitstream_version > 0) {
                *op++ = 17;
                *op++ = bitstream_version;
@@ -306,6 +307,8 @@ int lzogeneric1x_1_compress(const unsigned char *in, size_t in_len,
                m4_max_offset = M4_MAX_OFFSET_V0;
        }
 
+       data_start = op;
+
        while (l > 20) {
                size_t ll = l <= (m4_max_offset + 1) ? l : (m4_max_offset + 1);
                uintptr_t ll_end = (uintptr_t) ip + ll;
@@ -324,7 +327,7 @@ int lzogeneric1x_1_compress(const unsigned char *in, size_t in_len,
        if (t > 0) {
                const unsigned char *ii = in + in_len - t;
 
-               if (op == out && t <= 238) {
+               if (op == data_start && t <= 238) {
                        *op++ = (17 + t);
                } else if (t <= 3) {
                        op[state_offset] |= t;
index 6d2600ea3b5547efa35ae1572e83fc56f0a325ad..9e07e9ef1aad7e7f8b0044f6bf954ff2f4ae9099 100644 (file)
@@ -54,11 +54,9 @@ int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
        if (unlikely(in_len < 3))
                goto input_overrun;
 
-       if (likely(*ip == 17)) {
+       if (likely(in_len >= 5) && likely(*ip == 17)) {
                bitstream_version = ip[1];
                ip += 2;
-               if (unlikely(in_len < 5))
-                       goto input_overrun;
        } else {
                bitstream_version = 0;
        }
index 811d51b7cb86a2eb7e21e6c81525da0648eac509..6529fe1b45c13421d69665326ba620e7c39d01e0 100644 (file)
 
 #define HASH_DEFAULT_SIZE      64UL
 #define HASH_MIN_SIZE          4U
-#define BUCKET_LOCKS_PER_CPU   32UL
 
 union nested_table {
        union nested_table __rcu *table;
-       struct rhash_head __rcu *bucket;
+       struct rhash_lock_head __rcu *bucket;
 };
 
 static u32 head_hashfn(struct rhashtable *ht,
@@ -56,9 +55,11 @@ EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);
 
 int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash)
 {
-       spinlock_t *lock = rht_bucket_lock(tbl, hash);
-
-       return (debug_locks) ? lockdep_is_held(lock) : 1;
+       if (!debug_locks)
+               return 1;
+       if (unlikely(tbl->nest))
+               return 1;
+       return bit_spin_is_locked(0, (unsigned long *)&tbl->buckets[hash]);
 }
 EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
 #else
@@ -104,7 +105,6 @@ static void bucket_table_free(const struct bucket_table *tbl)
        if (tbl->nest)
                nested_bucket_table_free(tbl);
 
-       free_bucket_spinlocks(tbl->locks);
        kvfree(tbl);
 }
 
@@ -131,9 +131,11 @@ static union nested_table *nested_table_alloc(struct rhashtable *ht,
                        INIT_RHT_NULLS_HEAD(ntbl[i].bucket);
        }
 
-       rcu_assign_pointer(*prev, ntbl);
-
-       return ntbl;
+       if (cmpxchg(prev, NULL, ntbl) == NULL)
+               return ntbl;
+       /* Raced with another thread. */
+       kfree(ntbl);
+       return rcu_dereference(*prev);
 }
 
 static struct bucket_table *nested_bucket_table_alloc(struct rhashtable *ht,
@@ -169,11 +171,11 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
                                               gfp_t gfp)
 {
        struct bucket_table *tbl = NULL;
-       size_t size, max_locks;
+       size_t size;
        int i;
+       static struct lock_class_key __key;
 
-       size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
-       tbl = kvzalloc(size, gfp);
+       tbl = kvzalloc(struct_size(tbl, buckets, nbuckets), gfp);
 
        size = nbuckets;
 
@@ -185,17 +187,9 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
        if (tbl == NULL)
                return NULL;
 
-       tbl->size = size;
-
-       max_locks = size >> 1;
-       if (tbl->nest)
-               max_locks = min_t(size_t, max_locks, 1U << tbl->nest);
+       lockdep_init_map(&tbl->dep_map, "rhashtable_bucket", &__key, 0);
 
-       if (alloc_bucket_spinlocks(&tbl->locks, &tbl->locks_mask, max_locks,
-                                  ht->p.locks_mul, gfp) < 0) {
-               bucket_table_free(tbl);
-               return NULL;
-       }
+       tbl->size = size;
 
        rcu_head_init(&tbl->rcu);
        INIT_LIST_HEAD(&tbl->walkers);
@@ -221,14 +215,15 @@ static struct bucket_table *rhashtable_last_table(struct rhashtable *ht,
        return new_tbl;
 }
 
-static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash)
+static int rhashtable_rehash_one(struct rhashtable *ht,
+                                struct rhash_lock_head __rcu **bkt,
+                                unsigned int old_hash)
 {
        struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
        struct bucket_table *new_tbl = rhashtable_last_table(ht, old_tbl);
-       struct rhash_head __rcu **pprev = rht_bucket_var(old_tbl, old_hash);
        int err = -EAGAIN;
        struct rhash_head *head, *next, *entry;
-       spinlock_t *new_bucket_lock;
+       struct rhash_head __rcu **pprev = NULL;
        unsigned int new_hash;
 
        if (new_tbl->nest)
@@ -236,7 +231,8 @@ static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash)
 
        err = -ENOENT;
 
-       rht_for_each(entry, old_tbl, old_hash) {
+       rht_for_each_from(entry, rht_ptr(bkt, old_tbl, old_hash),
+                         old_tbl, old_hash) {
                err = 0;
                next = rht_dereference_bucket(entry->next, old_tbl, old_hash);
 
@@ -251,18 +247,19 @@ static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash)
 
        new_hash = head_hashfn(ht, new_tbl, entry);
 
-       new_bucket_lock = rht_bucket_lock(new_tbl, new_hash);
+       rht_lock_nested(new_tbl, &new_tbl->buckets[new_hash], SINGLE_DEPTH_NESTING);
 
-       spin_lock_nested(new_bucket_lock, SINGLE_DEPTH_NESTING);
-       head = rht_dereference_bucket(new_tbl->buckets[new_hash],
-                                     new_tbl, new_hash);
+       head = rht_ptr(new_tbl->buckets + new_hash, new_tbl, new_hash);
 
        RCU_INIT_POINTER(entry->next, head);
 
-       rcu_assign_pointer(new_tbl->buckets[new_hash], entry);
-       spin_unlock(new_bucket_lock);
+       rht_assign_unlock(new_tbl, &new_tbl->buckets[new_hash], entry);
 
-       rcu_assign_pointer(*pprev, next);
+       if (pprev)
+               rcu_assign_pointer(*pprev, next);
+       else
+               /* Need to preserved the bit lock. */
+               rht_assign_locked(bkt, next);
 
 out:
        return err;
@@ -272,19 +269,19 @@ static int rhashtable_rehash_chain(struct rhashtable *ht,
                                    unsigned int old_hash)
 {
        struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
-       spinlock_t *old_bucket_lock;
+       struct rhash_lock_head __rcu **bkt = rht_bucket_var(old_tbl, old_hash);
        int err;
 
-       old_bucket_lock = rht_bucket_lock(old_tbl, old_hash);
+       if (!bkt)
+               return 0;
+       rht_lock(old_tbl, bkt);
 
-       spin_lock_bh(old_bucket_lock);
-       while (!(err = rhashtable_rehash_one(ht, old_hash)))
+       while (!(err = rhashtable_rehash_one(ht, bkt, old_hash)))
                ;
 
        if (err == -ENOENT)
                err = 0;
-
-       spin_unlock_bh(old_bucket_lock);
+       rht_unlock(old_tbl, bkt);
 
        return err;
 }
@@ -481,6 +478,7 @@ static int rhashtable_insert_rehash(struct rhashtable *ht,
 }
 
 static void *rhashtable_lookup_one(struct rhashtable *ht,
+                                  struct rhash_lock_head __rcu **bkt,
                                   struct bucket_table *tbl, unsigned int hash,
                                   const void *key, struct rhash_head *obj)
 {
@@ -488,13 +486,12 @@ static void *rhashtable_lookup_one(struct rhashtable *ht,
                .ht = ht,
                .key = key,
        };
-       struct rhash_head __rcu **pprev;
+       struct rhash_head __rcu **pprev = NULL;
        struct rhash_head *head;
        int elasticity;
 
        elasticity = RHT_ELASTICITY;
-       pprev = rht_bucket_var(tbl, hash);
-       rht_for_each_from(head, *pprev, tbl, hash) {
+       rht_for_each_from(head, rht_ptr(bkt, tbl, hash), tbl, hash) {
                struct rhlist_head *list;
                struct rhlist_head *plist;
 
@@ -516,7 +513,11 @@ static void *rhashtable_lookup_one(struct rhashtable *ht,
                RCU_INIT_POINTER(list->next, plist);
                head = rht_dereference_bucket(head->next, tbl, hash);
                RCU_INIT_POINTER(list->rhead.next, head);
-               rcu_assign_pointer(*pprev, obj);
+               if (pprev)
+                       rcu_assign_pointer(*pprev, obj);
+               else
+                       /* Need to preserve the bit lock */
+                       rht_assign_locked(bkt, obj);
 
                return NULL;
        }
@@ -528,12 +529,12 @@ static void *rhashtable_lookup_one(struct rhashtable *ht,
 }
 
 static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht,
+                                                 struct rhash_lock_head __rcu **bkt,
                                                  struct bucket_table *tbl,
                                                  unsigned int hash,
                                                  struct rhash_head *obj,
                                                  void *data)
 {
-       struct rhash_head __rcu **pprev;
        struct bucket_table *new_tbl;
        struct rhash_head *head;
 
@@ -556,11 +557,7 @@ static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht,
        if (unlikely(rht_grow_above_100(ht, tbl)))
                return ERR_PTR(-EAGAIN);
 
-       pprev = rht_bucket_insert(ht, tbl, hash);
-       if (!pprev)
-               return ERR_PTR(-ENOMEM);
-
-       head = rht_dereference_bucket(*pprev, tbl, hash);
+       head = rht_ptr(bkt, tbl, hash);
 
        RCU_INIT_POINTER(obj->next, head);
        if (ht->rhlist) {
@@ -570,7 +567,10 @@ static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht,
                RCU_INIT_POINTER(list->next, NULL);
        }
 
-       rcu_assign_pointer(*pprev, obj);
+       /* bkt is always the head of the list, so it holds
+        * the lock, which we need to preserve
+        */
+       rht_assign_locked(bkt, obj);
 
        atomic_inc(&ht->nelems);
        if (rht_grow_above_75(ht, tbl))
@@ -584,6 +584,7 @@ static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
 {
        struct bucket_table *new_tbl;
        struct bucket_table *tbl;
+       struct rhash_lock_head __rcu **bkt;
        unsigned int hash;
        void *data;
 
@@ -592,14 +593,25 @@ static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
        do {
                tbl = new_tbl;
                hash = rht_head_hashfn(ht, tbl, obj, ht->p);
-               spin_lock_bh(rht_bucket_lock(tbl, hash));
-
-               data = rhashtable_lookup_one(ht, tbl, hash, key, obj);
-               new_tbl = rhashtable_insert_one(ht, tbl, hash, obj, data);
-               if (PTR_ERR(new_tbl) != -EEXIST)
-                       data = ERR_CAST(new_tbl);
-
-               spin_unlock_bh(rht_bucket_lock(tbl, hash));
+               if (rcu_access_pointer(tbl->future_tbl))
+                       /* Failure is OK */
+                       bkt = rht_bucket_var(tbl, hash);
+               else
+                       bkt = rht_bucket_insert(ht, tbl, hash);
+               if (bkt == NULL) {
+                       new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
+                       data = ERR_PTR(-EAGAIN);
+               } else {
+                       rht_lock(tbl, bkt);
+                       data = rhashtable_lookup_one(ht, bkt, tbl,
+                                                    hash, key, obj);
+                       new_tbl = rhashtable_insert_one(ht, bkt, tbl,
+                                                       hash, obj, data);
+                       if (PTR_ERR(new_tbl) != -EEXIST)
+                               data = ERR_CAST(new_tbl);
+
+                       rht_unlock(tbl, bkt);
+               }
        } while (!IS_ERR_OR_NULL(new_tbl));
 
        if (PTR_ERR(data) == -EAGAIN)
@@ -1026,11 +1038,6 @@ int rhashtable_init(struct rhashtable *ht,
 
        size = rounded_hashtable_size(&ht->p);
 
-       if (params->locks_mul)
-               ht->p.locks_mul = roundup_pow_of_two(params->locks_mul);
-       else
-               ht->p.locks_mul = BUCKET_LOCKS_PER_CPU;
-
        ht->key_len = ht->p.key_len;
        if (!params->hashfn) {
                ht->p.hashfn = jhash;
@@ -1132,7 +1139,7 @@ void rhashtable_free_and_destroy(struct rhashtable *ht,
                        struct rhash_head *pos, *next;
 
                        cond_resched();
-                       for (pos = rht_dereference(*rht_bucket(tbl, i), ht),
+                       for (pos = rht_ptr_exclusive(rht_bucket(tbl, i)),
                             next = !rht_is_a_nulls(pos) ?
                                        rht_dereference(pos->next, ht) : NULL;
                             !rht_is_a_nulls(pos);
@@ -1159,11 +1166,10 @@ void rhashtable_destroy(struct rhashtable *ht)
 }
 EXPORT_SYMBOL_GPL(rhashtable_destroy);
 
-struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl,
-                                           unsigned int hash)
+struct rhash_lock_head __rcu **__rht_bucket_nested(const struct bucket_table *tbl,
+                                                  unsigned int hash)
 {
        const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
-       static struct rhash_head __rcu *rhnull;
        unsigned int index = hash & ((1 << tbl->nest) - 1);
        unsigned int size = tbl->size >> tbl->nest;
        unsigned int subhash = hash;
@@ -1181,20 +1187,28 @@ struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl,
                subhash >>= shift;
        }
 
-       if (!ntbl) {
-               if (!rhnull)
-                       INIT_RHT_NULLS_HEAD(rhnull);
-               return &rhnull;
-       }
+       if (!ntbl)
+               return NULL;
 
        return &ntbl[subhash].bucket;
 
 }
+EXPORT_SYMBOL_GPL(__rht_bucket_nested);
+
+struct rhash_lock_head __rcu **rht_bucket_nested(const struct bucket_table *tbl,
+                                                unsigned int hash)
+{
+       static struct rhash_lock_head __rcu *rhnull;
+
+       if (!rhnull)
+               INIT_RHT_NULLS_HEAD(rhnull);
+       return __rht_bucket_nested(tbl, hash) ?: &rhnull;
+}
 EXPORT_SYMBOL_GPL(rht_bucket_nested);
 
-struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht,
-                                                  struct bucket_table *tbl,
-                                                  unsigned int hash)
+struct rhash_lock_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht,
+                                                       struct bucket_table *tbl,
+                                                       unsigned int hash)
 {
        const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
        unsigned int index = hash & ((1 << tbl->nest) - 1);
index 5b382c1244ede33c14016142ac2d7fec4d0608da..155fe38756ecfda251f26fa8616a325dddd8d455 100644 (file)
@@ -591,6 +591,17 @@ EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up);
 void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
                         unsigned int cpu)
 {
+       /*
+        * Once the clear bit is set, the bit may be allocated out.
+        *
+        * Orders READ/WRITE on the asssociated instance(such as request
+        * of blk_mq) by this bit for avoiding race with re-allocation,
+        * and its pair is the memory barrier implied in __sbitmap_get_word.
+        *
+        * One invariant is that the clear bit has to be zero when the bit
+        * is in use.
+        */
+       smp_mb__before_atomic();
        sbitmap_deferred_clear_bit(&sbq->sb, nr);
 
        /*
index 38e4ca08e757cbb9bfa7b551c86062fa252a2d94..3ab861c1a857ad1d75bfdd59ff61dc51d1c7350d 100644 (file)
@@ -866,6 +866,26 @@ __visible int memcmp(const void *cs, const void *ct, size_t count)
 EXPORT_SYMBOL(memcmp);
 #endif
 
+#ifndef __HAVE_ARCH_BCMP
+/**
+ * bcmp - returns 0 if and only if the buffers have identical contents.
+ * @a: pointer to first buffer.
+ * @b: pointer to second buffer.
+ * @len: size of buffers.
+ *
+ * The sign or magnitude of a non-zero return value has no particular
+ * meaning, and architectures may implement their own more efficient bcmp(). So
+ * while this particular implementation is a simple (tail) call to memcmp, do
+ * not rely on anything but whether the return value is zero or non-zero.
+ */
+#undef bcmp
+int bcmp(const void *a, const void *b, size_t len)
+{
+       return memcmp(a, b, len);
+}
+EXPORT_SYMBOL(bcmp);
+#endif
+
 #ifndef __HAVE_ARCH_MEMSCAN
 /**
  * memscan - Find a character in an area of memory.
index 1a7077f20eae4079a25aae3c4d6703edaffd8d65..fb328e7ccb0893136e949f3581cc8b647f414de2 100644 (file)
@@ -5,16 +5,14 @@
 #include <linux/export.h>
 #include <asm/syscall.h>
 
-static int collect_syscall(struct task_struct *target, long *callno,
-                          unsigned long args[6], unsigned int maxargs,
-                          unsigned long *sp, unsigned long *pc)
+static int collect_syscall(struct task_struct *target, struct syscall_info *info)
 {
        struct pt_regs *regs;
 
        if (!try_get_task_stack(target)) {
                /* Task has no stack, so the task isn't in a syscall. */
-               *sp = *pc = 0;
-               *callno = -1;
+               memset(info, 0, sizeof(*info));
+               info->data.nr = -1;
                return 0;
        }
 
@@ -24,12 +22,13 @@ static int collect_syscall(struct task_struct *target, long *callno,
                return -EAGAIN;
        }
 
-       *sp = user_stack_pointer(regs);
-       *pc = instruction_pointer(regs);
+       info->sp = user_stack_pointer(regs);
+       info->data.instruction_pointer = instruction_pointer(regs);
 
-       *callno = syscall_get_nr(target, regs);
-       if (*callno != -1L && maxargs > 0)
-               syscall_get_arguments(target, regs, 0, maxargs, args);
+       info->data.nr = syscall_get_nr(target, regs);
+       if (info->data.nr != -1L)
+               syscall_get_arguments(target, regs,
+                                     (unsigned long *)&info->data.args[0]);
 
        put_task_stack(target);
        return 0;
@@ -38,41 +37,35 @@ static int collect_syscall(struct task_struct *target, long *callno,
 /**
  * task_current_syscall - Discover what a blocked task is doing.
  * @target:            thread to examine
- * @callno:            filled with system call number or -1
- * @args:              filled with @maxargs system call arguments
- * @maxargs:           number of elements in @args to fill
- * @sp:                        filled with user stack pointer
- * @pc:                        filled with user PC
+ * @info:              structure with the following fields:
+ *                      .sp        - filled with user stack pointer
+ *                      .data.nr   - filled with system call number or -1
+ *                      .data.args - filled with @maxargs system call arguments
+ *                      .data.instruction_pointer - filled with user PC
  *
- * If @target is blocked in a system call, returns zero with *@callno
- * set to the the call's number and @args filled in with its arguments.
- * Registers not used for system call arguments may not be available and
- * it is not kosher to use &struct user_regset calls while the system
+ * If @target is blocked in a system call, returns zero with @info.data.nr
+ * set to the the call's number and @info.data.args filled in with its
+ * arguments. Registers not used for system call arguments may not be available
+ * and it is not kosher to use &struct user_regset calls while the system
  * call is still in progress.  Note we may get this result if @target
  * has finished its system call but not yet returned to user mode, such
  * as when it's stopped for signal handling or syscall exit tracing.
  *
  * If @target is blocked in the kernel during a fault or exception,
- * returns zero with *@callno set to -1 and does not fill in @args.
- * If so, it's now safe to examine @target using &struct user_regset
- * get() calls as long as we're sure @target won't return to user mode.
+ * returns zero with *@info.data.nr set to -1 and does not fill in
+ * @info.data.args. If so, it's now safe to examine @target using
+ * &struct user_regset get() calls as long as we're sure @target won't return
+ * to user mode.
  *
  * Returns -%EAGAIN if @target does not remain blocked.
- *
- * Returns -%EINVAL if @maxargs is too large (maximum is six).
  */
-int task_current_syscall(struct task_struct *target, long *callno,
-                        unsigned long args[6], unsigned int maxargs,
-                        unsigned long *sp, unsigned long *pc)
+int task_current_syscall(struct task_struct *target, struct syscall_info *info)
 {
        long state;
        unsigned long ncsw;
 
-       if (unlikely(maxargs > 6))
-               return -EINVAL;
-
        if (target == current)
-               return collect_syscall(target, callno, args, maxargs, sp, pc);
+               return collect_syscall(target, info);
 
        state = target->state;
        if (unlikely(!state))
@@ -80,7 +73,7 @@ int task_current_syscall(struct task_struct *target, long *callno,
 
        ncsw = wait_task_inactive(target, state);
        if (unlikely(!ncsw) ||
-           unlikely(collect_syscall(target, callno, args, maxargs, sp, pc)) ||
+           unlikely(collect_syscall(target, info)) ||
            unlikely(wait_task_inactive(target, state) != ncsw))
                return -EAGAIN;
 
index 3bd2e91bfc297245c85713fe55637fc7b3b26961..084fe5a6ac57cd9bed1101f0e8acf16a4d521ce2 100644 (file)
@@ -500,7 +500,7 @@ static unsigned int __init print_ht(struct rhltable *rhlt)
                struct rhash_head *pos, *next;
                struct test_obj_rhl *p;
 
-               pos = rht_dereference(tbl->buckets[i], ht);
+               pos = rht_ptr_exclusive(tbl->buckets + i);
                next = !rht_is_a_nulls(pos) ? rht_dereference(pos->next, ht) : NULL;
 
                if (!rht_is_a_nulls(pos)) {
index f171a83707ced436bb2bd4508060a6cd45a95905..3319e0872d014628a6e505fc80d9daeb8d8a2b47 100644 (file)
@@ -242,6 +242,7 @@ __reset_isolation_pfn(struct zone *zone, unsigned long pfn, bool check_source,
                                                        bool check_target)
 {
        struct page *page = pfn_to_online_page(pfn);
+       struct page *block_page;
        struct page *end_page;
        unsigned long block_pfn;
 
@@ -267,20 +268,26 @@ __reset_isolation_pfn(struct zone *zone, unsigned long pfn, bool check_source,
            get_pageblock_migratetype(page) != MIGRATE_MOVABLE)
                return false;
 
+       /* Ensure the start of the pageblock or zone is online and valid */
+       block_pfn = pageblock_start_pfn(pfn);
+       block_page = pfn_to_online_page(max(block_pfn, zone->zone_start_pfn));
+       if (block_page) {
+               page = block_page;
+               pfn = block_pfn;
+       }
+
+       /* Ensure the end of the pageblock or zone is online and valid */
+       block_pfn += pageblock_nr_pages;
+       block_pfn = min(block_pfn, zone_end_pfn(zone) - 1);
+       end_page = pfn_to_online_page(block_pfn);
+       if (!end_page)
+               return false;
+
        /*
         * Only clear the hint if a sample indicates there is either a
         * free page or an LRU page in the block. One or other condition
         * is necessary for the block to be a migration source/target.
         */
-       block_pfn = pageblock_start_pfn(pfn);
-       pfn = max(block_pfn, zone->zone_start_pfn);
-       page = pfn_to_page(pfn);
-       if (zone != page_zone(page))
-               return false;
-       pfn = block_pfn + pageblock_nr_pages;
-       pfn = min(pfn, zone_end_pfn(zone));
-       end_page = pfn_to_page(pfn);
-
        do {
                if (pfn_valid_within(pfn)) {
                        if (check_source && PageLRU(page)) {
@@ -309,7 +316,7 @@ __reset_isolation_pfn(struct zone *zone, unsigned long pfn, bool check_source,
 static void __reset_isolation_suitable(struct zone *zone)
 {
        unsigned long migrate_pfn = zone->zone_start_pfn;
-       unsigned long free_pfn = zone_end_pfn(zone);
+       unsigned long free_pfn = zone_end_pfn(zone) - 1;
        unsigned long reset_migrate = free_pfn;
        unsigned long reset_free = migrate_pfn;
        bool source_set = false;
@@ -1363,7 +1370,7 @@ fast_isolate_freepages(struct compact_control *cc)
                                count_compact_events(COMPACTISOLATED, nr_isolated);
                        } else {
                                /* If isolation fails, abort the search */
-                               order = -1;
+                               order = cc->search_order + 1;
                                page = NULL;
                        }
                }
index c0b31b6c38773f37177d1a90a9e5ddd96653449c..eee9c221280c07c22eec9c33845ec2edf003faf1 100644 (file)
@@ -79,7 +79,7 @@ void __dump_page(struct page *page, const char *reason)
                pr_warn("ksm ");
        else if (mapping) {
                pr_warn("%ps ", mapping->a_ops);
-               if (mapping->host->i_dentry.first) {
+               if (mapping->host && mapping->host->i_dentry.first) {
                        struct dentry *dentry;
                        dentry = container_of(mapping->host->i_dentry.first, struct dentry, d_u.d_alias);
                        pr_warn("name:\"%pd\" ", dentry);
@@ -168,7 +168,7 @@ void dump_mm(const struct mm_struct *mm)
                mm_pgtables_bytes(mm),
                mm->map_count,
                mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm,
-               atomic64_read(&mm->pinned_vm),
+               (u64)atomic64_read(&mm->pinned_vm),
                mm->data_vm, mm->exec_vm, mm->stack_vm,
                mm->start_code, mm->end_code, mm->start_data, mm->end_data,
                mm->start_brk, mm->brk, mm->start_stack,
index f84e22685aaaaa7ff1167697af36a16960171a7d..91819b8ad9cc511ca15a3d84ff81131cd4e2d0da 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -160,8 +160,12 @@ static struct page *follow_page_pte(struct vm_area_struct *vma,
                goto retry;
        }
 
-       if (flags & FOLL_GET)
-               get_page(page);
+       if (flags & FOLL_GET) {
+               if (unlikely(!try_get_page(page))) {
+                       page = ERR_PTR(-ENOMEM);
+                       goto out;
+               }
+       }
        if (flags & FOLL_TOUCH) {
                if ((flags & FOLL_WRITE) &&
                    !pte_dirty(pte) && !PageDirty(page))
@@ -298,7 +302,10 @@ static struct page *follow_pmd_mask(struct vm_area_struct *vma,
                        if (pmd_trans_unstable(pmd))
                                ret = -EBUSY;
                } else {
-                       get_page(page);
+                       if (unlikely(!try_get_page(page))) {
+                               spin_unlock(ptl);
+                               return ERR_PTR(-ENOMEM);
+                       }
                        spin_unlock(ptl);
                        lock_page(page);
                        ret = split_huge_page(page);
@@ -500,7 +507,10 @@ static int get_gate_page(struct mm_struct *mm, unsigned long address,
                if (is_device_public_page(*page))
                        goto unmap;
        }
-       get_page(*page);
+       if (unlikely(!try_get_page(*page))) {
+               ret = -ENOMEM;
+               goto unmap;
+       }
 out:
        ret = 0;
 unmap:
@@ -1545,6 +1555,20 @@ static void undo_dev_pagemap(int *nr, int nr_start, struct page **pages)
        }
 }
 
+/*
+ * Return the compund head page with ref appropriately incremented,
+ * or NULL if that failed.
+ */
+static inline struct page *try_get_compound_head(struct page *page, int refs)
+{
+       struct page *head = compound_head(page);
+       if (WARN_ON_ONCE(page_ref_count(head) < 0))
+               return NULL;
+       if (unlikely(!page_cache_add_speculative(head, refs)))
+               return NULL;
+       return head;
+}
+
 #ifdef CONFIG_ARCH_HAS_PTE_SPECIAL
 static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
                         int write, struct page **pages, int *nr)
@@ -1579,9 +1603,9 @@ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
 
                VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
                page = pte_page(pte);
-               head = compound_head(page);
 
-               if (!page_cache_get_speculative(head))
+               head = try_get_compound_head(page, 1);
+               if (!head)
                        goto pte_unmap;
 
                if (unlikely(pte_val(pte) != pte_val(*ptep))) {
@@ -1720,8 +1744,8 @@ static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
                refs++;
        } while (addr += PAGE_SIZE, addr != end);
 
-       head = compound_head(pmd_page(orig));
-       if (!page_cache_add_speculative(head, refs)) {
+       head = try_get_compound_head(pmd_page(orig), refs);
+       if (!head) {
                *nr -= refs;
                return 0;
        }
@@ -1758,8 +1782,8 @@ static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
                refs++;
        } while (addr += PAGE_SIZE, addr != end);
 
-       head = compound_head(pud_page(orig));
-       if (!page_cache_add_speculative(head, refs)) {
+       head = try_get_compound_head(pud_page(orig), refs);
+       if (!head) {
                *nr -= refs;
                return 0;
        }
@@ -1795,8 +1819,8 @@ static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr,
                refs++;
        } while (addr += PAGE_SIZE, addr != end);
 
-       head = compound_head(pgd_page(orig));
-       if (!page_cache_add_speculative(head, refs)) {
+       head = try_get_compound_head(pgd_page(orig), refs);
+       if (!head) {
                *nr -= refs;
                return 0;
        }
index 404acdcd0455d0d3dda191d994dfb27d0359104e..165ea46bf14926a4ae1ee664631475e0150f185f 100644 (file)
@@ -755,6 +755,21 @@ static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
        spinlock_t *ptl;
 
        ptl = pmd_lock(mm, pmd);
+       if (!pmd_none(*pmd)) {
+               if (write) {
+                       if (pmd_pfn(*pmd) != pfn_t_to_pfn(pfn)) {
+                               WARN_ON_ONCE(!is_huge_zero_pmd(*pmd));
+                               goto out_unlock;
+                       }
+                       entry = pmd_mkyoung(*pmd);
+                       entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
+                       if (pmdp_set_access_flags(vma, addr, pmd, entry, 1))
+                               update_mmu_cache_pmd(vma, addr, pmd);
+               }
+
+               goto out_unlock;
+       }
+
        entry = pmd_mkhuge(pfn_t_pmd(pfn, prot));
        if (pfn_t_devmap(pfn))
                entry = pmd_mkdevmap(entry);
@@ -766,11 +781,16 @@ static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
        if (pgtable) {
                pgtable_trans_huge_deposit(mm, pmd, pgtable);
                mm_inc_nr_ptes(mm);
+               pgtable = NULL;
        }
 
        set_pmd_at(mm, addr, pmd, entry);
        update_mmu_cache_pmd(vma, addr, pmd);
+
+out_unlock:
        spin_unlock(ptl);
+       if (pgtable)
+               pte_free(mm, pgtable);
 }
 
 vm_fault_t vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
@@ -821,6 +841,20 @@ static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
        spinlock_t *ptl;
 
        ptl = pud_lock(mm, pud);
+       if (!pud_none(*pud)) {
+               if (write) {
+                       if (pud_pfn(*pud) != pfn_t_to_pfn(pfn)) {
+                               WARN_ON_ONCE(!is_huge_zero_pud(*pud));
+                               goto out_unlock;
+                       }
+                       entry = pud_mkyoung(*pud);
+                       entry = maybe_pud_mkwrite(pud_mkdirty(entry), vma);
+                       if (pudp_set_access_flags(vma, addr, pud, entry, 1))
+                               update_mmu_cache_pud(vma, addr, pud);
+               }
+               goto out_unlock;
+       }
+
        entry = pud_mkhuge(pfn_t_pud(pfn, prot));
        if (pfn_t_devmap(pfn))
                entry = pud_mkdevmap(entry);
@@ -830,6 +864,8 @@ static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
        }
        set_pud_at(mm, addr, pud, entry);
        update_mmu_cache_pud(vma, addr, pud);
+
+out_unlock:
        spin_unlock(ptl);
 }
 
index 97b1e0290c66d48737cda50ccea6bbcc1782c8fc..6cdc7b2d910039a5e9f4fb4724c34ad8e2216c45 100644 (file)
@@ -4299,6 +4299,19 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
 
                pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
                page = pte_page(huge_ptep_get(pte));
+
+               /*
+                * Instead of doing 'try_get_page()' below in the same_page
+                * loop, just check the count once here.
+                */
+               if (unlikely(page_count(page) <= 0)) {
+                       if (pages) {
+                               spin_unlock(ptl);
+                               remainder = 0;
+                               err = -ENOMEM;
+                               break;
+                       }
+               }
 same_page:
                if (pages) {
                        pages[i] = mem_map_offset(page, pfn_offset);
index 3e0c11f7d7a1ef4125d355be6096d357efb8d9d9..3ce956efa0cb804cfd964bbc725857b7aed5d7f1 100644 (file)
@@ -163,7 +163,10 @@ static inline u8 random_tag(void)
 #endif
 
 #ifndef arch_kasan_set_tag
-#define arch_kasan_set_tag(addr, tag)  ((void *)(addr))
+static inline const void *arch_kasan_set_tag(const void *addr, u8 tag)
+{
+       return addr;
+}
 #endif
 #ifndef arch_kasan_reset_tag
 #define arch_kasan_reset_tag(addr)     ((void *)(addr))
index 707fa5579f66f1e1e96a5613e50ff74b92417954..6c318f5ac234f40237aec9a0bca5c9e7791cd5dc 100644 (file)
@@ -1529,11 +1529,6 @@ static void kmemleak_scan(void)
        }
        rcu_read_unlock();
 
-       /* data/bss scanning */
-       scan_large_block(_sdata, _edata);
-       scan_large_block(__bss_start, __bss_stop);
-       scan_large_block(__start_ro_after_init, __end_ro_after_init);
-
 #ifdef CONFIG_SMP
        /* per-cpu sections scanning */
        for_each_possible_cpu(i)
@@ -2071,6 +2066,17 @@ void __init kmemleak_init(void)
        }
        local_irq_restore(flags);
 
+       /* register the data/bss sections */
+       create_object((unsigned long)_sdata, _edata - _sdata,
+                     KMEMLEAK_GREY, GFP_ATOMIC);
+       create_object((unsigned long)__bss_start, __bss_stop - __bss_start,
+                     KMEMLEAK_GREY, GFP_ATOMIC);
+       /* only register .data..ro_after_init if not within .data */
+       if (__start_ro_after_init < _sdata || __end_ro_after_init > _edata)
+               create_object((unsigned long)__start_ro_after_init,
+                             __end_ro_after_init - __start_ro_after_init,
+                             KMEMLEAK_GREY, GFP_ATOMIC);
+
        /*
         * This is the point where tracking allocations is safe. Automatic
         * scanning is started during the late initcall. Add the early logged
index 532e0e2a4817e36d9106634c85a95136790f5227..81a0d3914ec999efcb36fb590e75c29d059d2b24 100644 (file)
@@ -3882,6 +3882,22 @@ struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
        return &memcg->cgwb_domain;
 }
 
+/*
+ * idx can be of type enum memcg_stat_item or node_stat_item.
+ * Keep in sync with memcg_exact_page().
+ */
+static unsigned long memcg_exact_page_state(struct mem_cgroup *memcg, int idx)
+{
+       long x = atomic_long_read(&memcg->stat[idx]);
+       int cpu;
+
+       for_each_online_cpu(cpu)
+               x += per_cpu_ptr(memcg->stat_cpu, cpu)->count[idx];
+       if (x < 0)
+               x = 0;
+       return x;
+}
+
 /**
  * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
  * @wb: bdi_writeback in question
@@ -3907,10 +3923,10 @@ void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
        struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
        struct mem_cgroup *parent;
 
-       *pdirty = memcg_page_state(memcg, NR_FILE_DIRTY);
+       *pdirty = memcg_exact_page_state(memcg, NR_FILE_DIRTY);
 
        /* this should eventually include NR_UNSTABLE_NFS */
-       *pwriteback = memcg_page_state(memcg, NR_WRITEBACK);
+       *pwriteback = memcg_exact_page_state(memcg, NR_WRITEBACK);
        *pfilepages = mem_cgroup_nr_lru_pages(memcg, (1 << LRU_INACTIVE_FILE) |
                                                     (1 << LRU_ACTIVE_FILE));
        *pheadroom = PAGE_COUNTER_MAX;
index 47fe250307c7aa0f553454af6f459343819f8770..ab650c21bccd5450673470f845675096b09010d9 100644 (file)
@@ -1549,10 +1549,12 @@ static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr,
                                WARN_ON_ONCE(!is_zero_pfn(pte_pfn(*pte)));
                                goto out_unlock;
                        }
-                       entry = *pte;
-                       goto out_mkwrite;
-               } else
-                       goto out_unlock;
+                       entry = pte_mkyoung(*pte);
+                       entry = maybe_mkwrite(pte_mkdirty(entry), vma);
+                       if (ptep_set_access_flags(vma, addr, pte, entry, 1))
+                               update_mmu_cache(vma, addr, pte);
+               }
+               goto out_unlock;
        }
 
        /* Ok, finally just insert the thing.. */
@@ -1561,7 +1563,6 @@ static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr,
        else
                entry = pte_mkspecial(pfn_t_pte(pfn, prot));
 
-out_mkwrite:
        if (mkwrite) {
                entry = pte_mkyoung(entry);
                entry = maybe_mkwrite(pte_mkdirty(entry), vma);
index f767582af4f8c0f28102f2d77d8dc6a667ec3df5..0082d699be94b4c28e1820351916568e68a684bb 100644 (file)
@@ -1576,7 +1576,7 @@ static int __ref __offline_pages(unsigned long start_pfn,
 {
        unsigned long pfn, nr_pages;
        long offlined_pages;
-       int ret, node;
+       int ret, node, nr_isolate_pageblock;
        unsigned long flags;
        unsigned long valid_start, valid_end;
        struct zone *zone;
@@ -1602,10 +1602,11 @@ static int __ref __offline_pages(unsigned long start_pfn,
        ret = start_isolate_page_range(start_pfn, end_pfn,
                                       MIGRATE_MOVABLE,
                                       SKIP_HWPOISON | REPORT_FAILURE);
-       if (ret) {
+       if (ret < 0) {
                reason = "failure to isolate range";
                goto failed_removal;
        }
+       nr_isolate_pageblock = ret;
 
        arg.start_pfn = start_pfn;
        arg.nr_pages = nr_pages;
@@ -1657,8 +1658,16 @@ static int __ref __offline_pages(unsigned long start_pfn,
        /* Ok, all of our target is isolated.
           We cannot do rollback at this point. */
        offline_isolated_pages(start_pfn, end_pfn);
-       /* reset pagetype flags and makes migrate type to be MOVABLE */
-       undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
+
+       /*
+        * Onlining will reset pagetype flags and makes migrate type
+        * MOVABLE, so just need to decrease the number of isolated
+        * pageblocks zone counter here.
+        */
+       spin_lock_irqsave(&zone->lock, flags);
+       zone->nr_isolate_pageblock -= nr_isolate_pageblock;
+       spin_unlock_irqrestore(&zone->lock, flags);
+
        /* removal success */
        adjust_managed_page_count(pfn_to_page(start_pfn), -offlined_pages);
        zone->present_pages -= offlined_pages;
@@ -1690,12 +1699,12 @@ static int __ref __offline_pages(unsigned long start_pfn,
 
 failed_removal_isolated:
        undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
+       memory_notify(MEM_CANCEL_OFFLINE, &arg);
 failed_removal:
        pr_debug("memory offlining [mem %#010llx-%#010llx] failed due to %s\n",
                 (unsigned long long) start_pfn << PAGE_SHIFT,
                 ((unsigned long long) end_pfn << PAGE_SHIFT) - 1,
                 reason);
-       memory_notify(MEM_CANCEL_OFFLINE, &arg);
        /* pushback to free area */
        mem_hotplug_done();
        return ret;
index af171ccb56a29713a326b1018e38215700ffcfe5..2219e747df494e5799d5e1af97d6b456907bfa43 100644 (file)
@@ -428,6 +428,13 @@ static inline bool queue_pages_required(struct page *page,
        return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT);
 }
 
+/*
+ * queue_pages_pmd() has three possible return values:
+ * 1 - pages are placed on the right node or queued successfully.
+ * 0 - THP was split.
+ * -EIO - is migration entry or MPOL_MF_STRICT was specified and an existing
+ *        page was already on a node that does not follow the policy.
+ */
 static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
                                unsigned long end, struct mm_walk *walk)
 {
@@ -437,7 +444,7 @@ static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
        unsigned long flags;
 
        if (unlikely(is_pmd_migration_entry(*pmd))) {
-               ret = 1;
+               ret = -EIO;
                goto unlock;
        }
        page = pmd_page(*pmd);
@@ -454,8 +461,15 @@ static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
        ret = 1;
        flags = qp->flags;
        /* go to thp migration */
-       if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
+       if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
+               if (!vma_migratable(walk->vma)) {
+                       ret = -EIO;
+                       goto unlock;
+               }
+
                migrate_page_add(page, qp->pagelist, flags);
+       } else
+               ret = -EIO;
 unlock:
        spin_unlock(ptl);
 out:
@@ -480,8 +494,10 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
        ptl = pmd_trans_huge_lock(pmd, vma);
        if (ptl) {
                ret = queue_pages_pmd(pmd, ptl, addr, end, walk);
-               if (ret)
+               if (ret > 0)
                        return 0;
+               else if (ret < 0)
+                       return ret;
        }
 
        if (pmd_trans_unstable(pmd))
@@ -502,11 +518,16 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
                        continue;
                if (!queue_pages_required(page, qp))
                        continue;
-               migrate_page_add(page, qp->pagelist, flags);
+               if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
+                       if (!vma_migratable(vma))
+                               break;
+                       migrate_page_add(page, qp->pagelist, flags);
+               } else
+                       break;
        }
        pte_unmap_unlock(pte - 1, ptl);
        cond_resched();
-       return 0;
+       return addr != end ? -EIO : 0;
 }
 
 static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
@@ -576,7 +597,12 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end,
        unsigned long endvma = vma->vm_end;
        unsigned long flags = qp->flags;
 
-       if (!vma_migratable(vma))
+       /*
+        * Need check MPOL_MF_STRICT to return -EIO if possible
+        * regardless of vma_migratable
+        */
+       if (!vma_migratable(vma) &&
+           !(flags & MPOL_MF_STRICT))
                return 1;
 
        if (endvma > end)
@@ -603,7 +629,7 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end,
        }
 
        /* queue pages from current vma */
-       if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
+       if (flags & MPOL_MF_VALID)
                return 0;
        return 1;
 }
index ac6f4939bb5975a2cab6e78419562c1529e12219..663a5449367a4204e937491d2d9032b0a3768bdf 100644 (file)
@@ -248,10 +248,8 @@ static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
                                pte = swp_entry_to_pte(entry);
                        } else if (is_device_public_page(new)) {
                                pte = pte_mkdevmap(pte);
-                               flush_dcache_page(new);
                        }
-               } else
-                       flush_dcache_page(new);
+               }
 
 #ifdef CONFIG_HUGETLB_PAGE
                if (PageHuge(new)) {
@@ -995,6 +993,13 @@ static int move_to_new_page(struct page *newpage, struct page *page,
                 */
                if (!PageMappingFlags(page))
                        page->mapping = NULL;
+
+               if (unlikely(is_zone_device_page(newpage))) {
+                       if (is_device_public_page(newpage))
+                               flush_dcache_page(newpage);
+               } else
+                       flush_dcache_page(newpage);
+
        }
 out:
        return rc;
index 03fcf73d47dabde0987f3542c3c87fca33bf5a5d..d96ca5bc555bbc432e135c876151e0699ee88162 100644 (file)
@@ -8233,7 +8233,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
 
        ret = start_isolate_page_range(pfn_max_align_down(start),
                                       pfn_max_align_up(end), migratetype, 0);
-       if (ret)
+       if (ret < 0)
                return ret;
 
        /*
index ce323e56b34d6bc43a9e16cc9054f35290bb0b63..019280712e1b8b7e075b51573b5c56d07aef3922 100644 (file)
@@ -59,7 +59,8 @@ static int set_migratetype_isolate(struct page *page, int migratetype, int isol_
         * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
         * We just check MOVABLE pages.
         */
-       if (!has_unmovable_pages(zone, page, arg.pages_found, migratetype, flags))
+       if (!has_unmovable_pages(zone, page, arg.pages_found, migratetype,
+                                isol_flags))
                ret = 0;
 
        /*
@@ -160,27 +161,36 @@ __first_valid_page(unsigned long pfn, unsigned long nr_pages)
        return NULL;
 }
 
-/*
- * start_isolate_page_range() -- make page-allocation-type of range of pages
- * to be MIGRATE_ISOLATE.
- * @start_pfn: The lower PFN of the range to be isolated.
- * @end_pfn: The upper PFN of the range to be isolated.
- * @migratetype: migrate type to set in error recovery.
+/**
+ * start_isolate_page_range() - make page-allocation-type of range of pages to
+ * be MIGRATE_ISOLATE.
+ * @start_pfn:         The lower PFN of the range to be isolated.
+ * @end_pfn:           The upper PFN of the range to be isolated.
+ *                     start_pfn/end_pfn must be aligned to pageblock_order.
+ * @migratetype:       Migrate type to set in error recovery.
+ * @flags:             The following flags are allowed (they can be combined in
+ *                     a bit mask)
+ *                     SKIP_HWPOISON - ignore hwpoison pages
+ *                     REPORT_FAILURE - report details about the failure to
+ *                     isolate the range
  *
  * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in
  * the range will never be allocated. Any free pages and pages freed in the
- * future will not be allocated again.
- *
- * start_pfn/end_pfn must be aligned to pageblock_order.
- * Return 0 on success and -EBUSY if any part of range cannot be isolated.
+ * future will not be allocated again. If specified range includes migrate types
+ * other than MOVABLE or CMA, this will fail with -EBUSY. For isolating all
+ * pages in the range finally, the caller have to free all pages in the range.
+ * test_page_isolated() can be used for test it.
  *
  * There is no high level synchronization mechanism that prevents two threads
- * from trying to isolate overlapping ranges.  If this happens, one thread
+ * from trying to isolate overlapping ranges. If this happens, one thread
  * will notice pageblocks in the overlapping range already set to isolate.
  * This happens in set_migratetype_isolate, and set_migratetype_isolate
- * returns an error.  We then clean up by restoring the migration type on
- * pageblocks we may have modified and return -EBUSY to caller.  This
+ * returns an error. We then clean up by restoring the migration type on
+ * pageblocks we may have modified and return -EBUSY to caller. This
  * prevents two threads from simultaneously working on overlapping ranges.
+ *
+ * Return: the number of isolated pageblocks on success and -EBUSY if any part
+ * of range cannot be isolated.
  */
 int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
                             unsigned migratetype, int flags)
@@ -188,6 +198,7 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
        unsigned long pfn;
        unsigned long undo_pfn;
        struct page *page;
+       int nr_isolate_pageblock = 0;
 
        BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages));
        BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages));
@@ -196,13 +207,15 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
             pfn < end_pfn;
             pfn += pageblock_nr_pages) {
                page = __first_valid_page(pfn, pageblock_nr_pages);
-               if (page &&
-                   set_migratetype_isolate(page, migratetype, flags)) {
-                       undo_pfn = pfn;
-                       goto undo;
+               if (page) {
+                       if (set_migratetype_isolate(page, migratetype, flags)) {
+                               undo_pfn = pfn;
+                               goto undo;
+                       }
+                       nr_isolate_pageblock++;
                }
        }
-       return 0;
+       return nr_isolate_pageblock;
 undo:
        for (pfn = start_pfn;
             pfn < undo_pfn;
index 28652e4218e0c1e5da82e2094d5ec43046e9b472..47a380a486eefdfb2c93434c920dcecb53e272b6 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2115,6 +2115,8 @@ int __kmem_cache_create(struct kmem_cache *cachep, slab_flags_t flags)
        cachep->allocflags = __GFP_COMP;
        if (flags & SLAB_CACHE_DMA)
                cachep->allocflags |= GFP_DMA;
+       if (flags & SLAB_CACHE_DMA32)
+               cachep->allocflags |= GFP_DMA32;
        if (flags & SLAB_RECLAIM_ACCOUNT)
                cachep->allocflags |= __GFP_RECLAIMABLE;
        cachep->size = size;
@@ -4306,7 +4308,8 @@ static void show_symbol(struct seq_file *m, unsigned long address)
 
 static int leaks_show(struct seq_file *m, void *p)
 {
-       struct kmem_cache *cachep = list_entry(p, struct kmem_cache, list);
+       struct kmem_cache *cachep = list_entry(p, struct kmem_cache,
+                                              root_caches_node);
        struct page *page;
        struct kmem_cache_node *n;
        const char *name;
index e5e6658eeacca81c694ccef400d19bbcd138d6ab..43ac818b8592bc472b4b67e19831b404cc798aca 100644 (file)
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -127,7 +127,8 @@ static inline slab_flags_t kmem_cache_flags(unsigned int object_size,
 
 
 /* Legal flag mask for kmem_cache_create(), for various configurations */
-#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
+#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
+                        SLAB_CACHE_DMA32 | SLAB_PANIC | \
                         SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS )
 
 #if defined(CONFIG_DEBUG_SLAB)
index 03eeb8b7b4b1d5d9fc0a395459478c79ad8a2656..58251ba63e4a19fb9262c6adb59831075a858dd5 100644 (file)
@@ -53,7 +53,7 @@ static DECLARE_WORK(slab_caches_to_rcu_destroy_work,
                SLAB_FAILSLAB | SLAB_KASAN)
 
 #define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \
-                        SLAB_ACCOUNT)
+                        SLAB_CACHE_DMA32 | SLAB_ACCOUNT)
 
 /*
  * Merge control. If this is set then no merging of slab caches will occur.
index 1b08fbcb7e61fbcc5fa84738dc09e88050d2bd2b..d30ede89f4a6499a07e69baf981b755d0a1b4400 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3589,6 +3589,9 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
        if (s->flags & SLAB_CACHE_DMA)
                s->allocflags |= GFP_DMA;
 
+       if (s->flags & SLAB_CACHE_DMA32)
+               s->allocflags |= GFP_DMA32;
+
        if (s->flags & SLAB_RECLAIM_ACCOUNT)
                s->allocflags |= __GFP_RECLAIMABLE;
 
@@ -5679,6 +5682,8 @@ static char *create_unique_id(struct kmem_cache *s)
         */
        if (s->flags & SLAB_CACHE_DMA)
                *p++ = 'd';
+       if (s->flags & SLAB_CACHE_DMA32)
+               *p++ = 'D';
        if (s->flags & SLAB_RECLAIM_ACCOUNT)
                *p++ = 'a';
        if (s->flags & SLAB_CONSISTENCY_CHECKS)
index 69904aa6165bf13b89a44d6abf84609fea2076ba..56e057c432f9663439c4cfd38f8cad8f2e6e2c3d 100644 (file)
@@ -567,7 +567,7 @@ void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
 }
 
 #ifdef CONFIG_MEMORY_HOTREMOVE
-/* Mark all memory sections within the pfn range as online */
+/* Mark all memory sections within the pfn range as offline */
 void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
 {
        unsigned long pfn;
index d559bde497a9b9690f328f6f5deea3eb17de474b..43a2984bccaab6525afb6e7c61ce8d903f45f24e 100644 (file)
--- a/mm/util.c
+++ b/mm/util.c
@@ -204,7 +204,7 @@ EXPORT_SYMBOL(vmemdup_user);
  * @s: The string to duplicate
  * @n: Maximum number of bytes to copy, including the trailing NUL.
  *
- * Return: newly allocated copy of @s or %NULL in case of error
+ * Return: newly allocated copy of @s or an ERR_PTR() in case of error
  */
 char *strndup_user(const char __user *s, long n)
 {
index 15293c2a5dd821d39233c177d540e4db7407b110..8d77b6ee4477df71bc466c057338ea4ae62dcd88 100644 (file)
@@ -443,27 +443,29 @@ static int vlan_dev_fcoe_disable(struct net_device *dev)
        return rc;
 }
 
-static int vlan_dev_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
+static int vlan_dev_fcoe_ddp_target(struct net_device *dev, u16 xid,
+                                   struct scatterlist *sgl, unsigned int sgc)
 {
        struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
        const struct net_device_ops *ops = real_dev->netdev_ops;
-       int rc = -EINVAL;
+       int rc = 0;
+
+       if (ops->ndo_fcoe_ddp_target)
+               rc = ops->ndo_fcoe_ddp_target(real_dev, xid, sgl, sgc);
 
-       if (ops->ndo_fcoe_get_wwn)
-               rc = ops->ndo_fcoe_get_wwn(real_dev, wwn, type);
        return rc;
 }
+#endif
 
-static int vlan_dev_fcoe_ddp_target(struct net_device *dev, u16 xid,
-                                   struct scatterlist *sgl, unsigned int sgc)
+#ifdef NETDEV_FCOE_WWNN
+static int vlan_dev_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
 {
        struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
        const struct net_device_ops *ops = real_dev->netdev_ops;
-       int rc = 0;
-
-       if (ops->ndo_fcoe_ddp_target)
-               rc = ops->ndo_fcoe_ddp_target(real_dev, xid, sgl, sgc);
+       int rc = -EINVAL;
 
+       if (ops->ndo_fcoe_get_wwn)
+               rc = ops->ndo_fcoe_get_wwn(real_dev, wwn, type);
        return rc;
 }
 #endif
@@ -794,9 +796,11 @@ static const struct net_device_ops vlan_netdev_ops = {
        .ndo_fcoe_ddp_done      = vlan_dev_fcoe_ddp_done,
        .ndo_fcoe_enable        = vlan_dev_fcoe_enable,
        .ndo_fcoe_disable       = vlan_dev_fcoe_disable,
-       .ndo_fcoe_get_wwn       = vlan_dev_fcoe_get_wwn,
        .ndo_fcoe_ddp_target    = vlan_dev_fcoe_ddp_target,
 #endif
+#ifdef NETDEV_FCOE_WWNN
+       .ndo_fcoe_get_wwn       = vlan_dev_fcoe_get_wwn,
+#endif
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller    = vlan_dev_poll_controller,
        .ndo_netpoll_setup      = vlan_dev_netpoll_setup,
index d795b9c5aea4a4e35021d9db2e10254036df55fe..b9e67e589a7b43eeda8268f329adf607e167e80f 100644 (file)
@@ -345,8 +345,8 @@ static netdev_tx_t clip_start_xmit(struct sk_buff *skb,
                return NETDEV_TX_OK;
        }
        rt = (struct rtable *) dst;
-       if (rt->rt_gateway)
-               daddr = &rt->rt_gateway;
+       if (rt->rt_gw_family == AF_INET)
+               daddr = &rt->rt_gw4;
        else
                daddr = &ip_hdr(skb)->daddr;
        n = dst_neigh_lookup(dst, daddr);
index d7f5cf5b7594d0ea4e766e06fbc07e6fce590e3b..ad4f829193f053c8a0c0846f1e9f619617dcd18e 100644 (file)
@@ -710,7 +710,10 @@ static int lec_vcc_attach(struct atm_vcc *vcc, void __user *arg)
 
 static int lec_mcast_attach(struct atm_vcc *vcc, int arg)
 {
-       if (arg < 0 || arg >= MAX_LEC_ITF || !dev_lec[arg])
+       if (arg < 0 || arg >= MAX_LEC_ITF)
+               return -EINVAL;
+       arg = array_index_nospec(arg, MAX_LEC_ITF);
+       if (!dev_lec[arg])
                return -EINVAL;
        vcc->proto_data = dev_lec[arg];
        return lec_mcast_make(netdev_priv(dev_lec[arg]), vcc);
@@ -728,6 +731,7 @@ static int lecd_attach(struct atm_vcc *vcc, int arg)
                i = arg;
        if (arg >= MAX_LEC_ITF)
                return -EINVAL;
+       i = array_index_nospec(arg, MAX_LEC_ITF);
        if (!dev_lec[i]) {
                int size;
 
index 13b9ab860a25cd9cda89d710a57a3c12fdb92006..2614a9caee008539cc489b71dabdc36ac0ae3752 100644 (file)
@@ -92,8 +92,10 @@ static u32 batadv_v_elp_get_throughput(struct batadv_hardif_neigh_node *neigh)
 
                ret = cfg80211_get_station(real_netdev, neigh->addr, &sinfo);
 
-               /* free the TID stats immediately */
-               cfg80211_sinfo_release_content(&sinfo);
+               if (!ret) {
+                       /* free the TID stats immediately */
+                       cfg80211_sinfo_release_content(&sinfo);
+               }
 
                dev_put(real_netdev);
                if (ret == -ENOENT) {
index 8d6b7c9c2a7e20b1abb715167339c383ca588e27..663a53b6d36e65508b4296d86cecdd808c653836 100644 (file)
@@ -790,6 +790,8 @@ static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
                                 const u8 *mac, const unsigned short vid)
 {
        struct batadv_bla_claim search_claim, *claim;
+       struct batadv_bla_claim *claim_removed_entry;
+       struct hlist_node *claim_removed_node;
 
        ether_addr_copy(search_claim.addr, mac);
        search_claim.vid = vid;
@@ -800,10 +802,18 @@ static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
        batadv_dbg(BATADV_DBG_BLA, bat_priv, "%s(): %pM, vid %d\n", __func__,
                   mac, batadv_print_vid(vid));
 
-       batadv_hash_remove(bat_priv->bla.claim_hash, batadv_compare_claim,
-                          batadv_choose_claim, claim);
-       batadv_claim_put(claim); /* reference from the hash is gone */
+       claim_removed_node = batadv_hash_remove(bat_priv->bla.claim_hash,
+                                               batadv_compare_claim,
+                                               batadv_choose_claim, claim);
+       if (!claim_removed_node)
+               goto free_claim;
 
+       /* reference from the hash is gone */
+       claim_removed_entry = hlist_entry(claim_removed_node,
+                                         struct batadv_bla_claim, hash_entry);
+       batadv_claim_put(claim_removed_entry);
+
+free_claim:
        /* don't need the reference from hash_find() anymore */
        batadv_claim_put(claim);
 }
index ad14c8086fe7792cb749858adeae2804016eb92b..80fc3253c3368e3cc356176c5ba961542de0a8c9 100644 (file)
@@ -1130,9 +1130,9 @@ static ssize_t batadv_store_throughput_override(struct kobject *kobj,
                                                struct attribute *attr,
                                                char *buff, size_t count)
 {
-       struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);
        struct net_device *net_dev = batadv_kobj_to_netdev(kobj);
        struct batadv_hard_iface *hard_iface;
+       struct batadv_priv *bat_priv;
        u32 tp_override;
        u32 old_tp_override;
        bool ret;
@@ -1163,7 +1163,10 @@ static ssize_t batadv_store_throughput_override(struct kobject *kobj,
 
        atomic_set(&hard_iface->bat_v.throughput_override, tp_override);
 
-       batadv_netlink_notify_hardif(bat_priv, hard_iface);
+       if (hard_iface->soft_iface) {
+               bat_priv = netdev_priv(hard_iface->soft_iface);
+               batadv_netlink_notify_hardif(bat_priv, hard_iface);
+       }
 
 out:
        batadv_hardif_put(hard_iface);
index 5d8bf8048e4e0437e873faa62b716b0b9612c029..1ddfd5e011eecb807d7bb9192ff012a8ed1f923e 100644 (file)
@@ -603,14 +603,26 @@ static void batadv_tt_global_free(struct batadv_priv *bat_priv,
                                  struct batadv_tt_global_entry *tt_global,
                                  const char *message)
 {
+       struct batadv_tt_global_entry *tt_removed_entry;
+       struct hlist_node *tt_removed_node;
+
        batadv_dbg(BATADV_DBG_TT, bat_priv,
                   "Deleting global tt entry %pM (vid: %d): %s\n",
                   tt_global->common.addr,
                   batadv_print_vid(tt_global->common.vid), message);
 
-       batadv_hash_remove(bat_priv->tt.global_hash, batadv_compare_tt,
-                          batadv_choose_tt, &tt_global->common);
-       batadv_tt_global_entry_put(tt_global);
+       tt_removed_node = batadv_hash_remove(bat_priv->tt.global_hash,
+                                            batadv_compare_tt,
+                                            batadv_choose_tt,
+                                            &tt_global->common);
+       if (!tt_removed_node)
+               return;
+
+       /* drop reference of remove hash entry */
+       tt_removed_entry = hlist_entry(tt_removed_node,
+                                      struct batadv_tt_global_entry,
+                                      common.hash_entry);
+       batadv_tt_global_entry_put(tt_removed_entry);
 }
 
 /**
@@ -1324,9 +1336,10 @@ u16 batadv_tt_local_remove(struct batadv_priv *bat_priv, const u8 *addr,
                           unsigned short vid, const char *message,
                           bool roaming)
 {
+       struct batadv_tt_local_entry *tt_removed_entry;
        struct batadv_tt_local_entry *tt_local_entry;
        u16 flags, curr_flags = BATADV_NO_FLAGS;
-       void *tt_entry_exists;
+       struct hlist_node *tt_removed_node;
 
        tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid);
        if (!tt_local_entry)
@@ -1355,15 +1368,18 @@ u16 batadv_tt_local_remove(struct batadv_priv *bat_priv, const u8 *addr,
         */
        batadv_tt_local_event(bat_priv, tt_local_entry, BATADV_TT_CLIENT_DEL);
 
-       tt_entry_exists = batadv_hash_remove(bat_priv->tt.local_hash,
+       tt_removed_node = batadv_hash_remove(bat_priv->tt.local_hash,
                                             batadv_compare_tt,
                                             batadv_choose_tt,
                                             &tt_local_entry->common);
-       if (!tt_entry_exists)
+       if (!tt_removed_node)
                goto out;
 
-       /* extra call to free the local tt entry */
-       batadv_tt_local_entry_put(tt_local_entry);
+       /* drop reference of remove hash entry */
+       tt_removed_entry = hlist_entry(tt_removed_node,
+                                      struct batadv_tt_local_entry,
+                                      common.hash_entry);
+       batadv_tt_local_entry_put(tt_removed_entry);
 
 out:
        if (tt_local_entry)
index 9a580999ca57e3037336bbcdb321dbb4ef0cb196..d892b7c3cc42a05e10053832d7bd4d969f019e46 100644 (file)
@@ -523,12 +523,12 @@ static int sco_sock_bind(struct socket *sock, struct sockaddr *addr,
        struct sock *sk = sock->sk;
        int err = 0;
 
-       BT_DBG("sk %p %pMR", sk, &sa->sco_bdaddr);
-
        if (!addr || addr_len < sizeof(struct sockaddr_sco) ||
            addr->sa_family != AF_BLUETOOTH)
                return -EINVAL;
 
+       BT_DBG("sk %p %pMR", sk, &sa->sco_bdaddr);
+
        lock_sock(sk);
 
        if (sk->sk_state != BT_OPEN) {
index 27b2992a06925240a8118e8182e8c7f048a73bbf..b0ca361742e43bc71d822d79bf064a952a159677 100644 (file)
@@ -1 +1 @@
-obj-y  := test_run.o
+obj-$(CONFIG_BPF_SYSCALL)      := test_run.o
index fab142b796ef28e509b862af3d02523283e1f8ee..2221573dacdb71b9acd118de89d25916f7b2af72 100644 (file)
@@ -123,12 +123,126 @@ static void *bpf_test_init(const union bpf_attr *kattr, u32 size,
        return data;
 }
 
+static void *bpf_ctx_init(const union bpf_attr *kattr, u32 max_size)
+{
+       void __user *data_in = u64_to_user_ptr(kattr->test.ctx_in);
+       void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
+       u32 size = kattr->test.ctx_size_in;
+       void *data;
+       int err;
+
+       if (!data_in && !data_out)
+               return NULL;
+
+       data = kzalloc(max_size, GFP_USER);
+       if (!data)
+               return ERR_PTR(-ENOMEM);
+
+       if (data_in) {
+               err = bpf_check_uarg_tail_zero(data_in, max_size, size);
+               if (err) {
+                       kfree(data);
+                       return ERR_PTR(err);
+               }
+
+               size = min_t(u32, max_size, size);
+               if (copy_from_user(data, data_in, size)) {
+                       kfree(data);
+                       return ERR_PTR(-EFAULT);
+               }
+       }
+       return data;
+}
+
+static int bpf_ctx_finish(const union bpf_attr *kattr,
+                         union bpf_attr __user *uattr, const void *data,
+                         u32 size)
+{
+       void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
+       int err = -EFAULT;
+       u32 copy_size = size;
+
+       if (!data || !data_out)
+               return 0;
+
+       if (copy_size > kattr->test.ctx_size_out) {
+               copy_size = kattr->test.ctx_size_out;
+               err = -ENOSPC;
+       }
+
+       if (copy_to_user(data_out, data, copy_size))
+               goto out;
+       if (copy_to_user(&uattr->test.ctx_size_out, &size, sizeof(size)))
+               goto out;
+       if (err != -ENOSPC)
+               err = 0;
+out:
+       return err;
+}
+
+/**
+ * range_is_zero - test whether buffer is initialized
+ * @buf: buffer to check
+ * @from: check from this position
+ * @to: check up until (excluding) this position
+ *
+ * This function returns true if the there is a non-zero byte
+ * in the buf in the range [from,to).
+ */
+static inline bool range_is_zero(void *buf, size_t from, size_t to)
+{
+       return !memchr_inv((u8 *)buf + from, 0, to - from);
+}
+
+static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb)
+{
+       struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
+
+       if (!__skb)
+               return 0;
+
+       /* make sure the fields we don't use are zeroed */
+       if (!range_is_zero(__skb, 0, offsetof(struct __sk_buff, priority)))
+               return -EINVAL;
+
+       /* priority is allowed */
+
+       if (!range_is_zero(__skb, offsetof(struct __sk_buff, priority) +
+                          FIELD_SIZEOF(struct __sk_buff, priority),
+                          offsetof(struct __sk_buff, cb)))
+               return -EINVAL;
+
+       /* cb is allowed */
+
+       if (!range_is_zero(__skb, offsetof(struct __sk_buff, cb) +
+                          FIELD_SIZEOF(struct __sk_buff, cb),
+                          sizeof(struct __sk_buff)))
+               return -EINVAL;
+
+       skb->priority = __skb->priority;
+       memcpy(&cb->data, __skb->cb, QDISC_CB_PRIV_LEN);
+
+       return 0;
+}
+
+static void convert_skb_to___skb(struct sk_buff *skb, struct __sk_buff *__skb)
+{
+       struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
+
+       if (!__skb)
+               return;
+
+       __skb->priority = skb->priority;
+       memcpy(__skb->cb, &cb->data, QDISC_CB_PRIV_LEN);
+}
+
 int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
                          union bpf_attr __user *uattr)
 {
        bool is_l2 = false, is_direct_pkt_access = false;
        u32 size = kattr->test.data_size_in;
        u32 repeat = kattr->test.repeat;
+       struct __sk_buff *ctx = NULL;
        u32 retval, duration;
        int hh_len = ETH_HLEN;
        struct sk_buff *skb;
@@ -141,6 +255,12 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
        if (IS_ERR(data))
                return PTR_ERR(data);
 
+       ctx = bpf_ctx_init(kattr, sizeof(struct __sk_buff));
+       if (IS_ERR(ctx)) {
+               kfree(data);
+               return PTR_ERR(ctx);
+       }
+
        switch (prog->type) {
        case BPF_PROG_TYPE_SCHED_CLS:
        case BPF_PROG_TYPE_SCHED_ACT:
@@ -158,6 +278,7 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
        sk = kzalloc(sizeof(struct sock), GFP_USER);
        if (!sk) {
                kfree(data);
+               kfree(ctx);
                return -ENOMEM;
        }
        sock_net_set(sk, current->nsproxy->net_ns);
@@ -166,6 +287,7 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
        skb = build_skb(data, 0);
        if (!skb) {
                kfree(data);
+               kfree(ctx);
                kfree(sk);
                return -ENOMEM;
        }
@@ -180,32 +302,37 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
                __skb_push(skb, hh_len);
        if (is_direct_pkt_access)
                bpf_compute_data_pointers(skb);
+       ret = convert___skb_to_skb(skb, ctx);
+       if (ret)
+               goto out;
        ret = bpf_test_run(prog, skb, repeat, &retval, &duration);
-       if (ret) {
-               kfree_skb(skb);
-               kfree(sk);
-               return ret;
-       }
+       if (ret)
+               goto out;
        if (!is_l2) {
                if (skb_headroom(skb) < hh_len) {
                        int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
 
                        if (pskb_expand_head(skb, nhead, 0, GFP_USER)) {
-                               kfree_skb(skb);
-                               kfree(sk);
-                               return -ENOMEM;
+                               ret = -ENOMEM;
+                               goto out;
                        }
                }
                memset(__skb_push(skb, hh_len), 0, hh_len);
        }
+       convert_skb_to___skb(skb, ctx);
 
        size = skb->len;
        /* bpf program can never convert linear skb to non-linear */
        if (WARN_ON_ONCE(skb_is_nonlinear(skb)))
                size = skb_headlen(skb);
        ret = bpf_test_finish(kattr, uattr, skb->data, size, retval, duration);
+       if (!ret)
+               ret = bpf_ctx_finish(kattr, uattr, ctx,
+                                    sizeof(struct __sk_buff));
+out:
        kfree_skb(skb);
        kfree(sk);
+       kfree(ctx);
        return ret;
 }
 
@@ -220,6 +347,9 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
        void *data;
        int ret;
 
+       if (kattr->test.ctx_in || kattr->test.ctx_out)
+               return -EINVAL;
+
        data = bpf_test_init(kattr, size, XDP_PACKET_HEADROOM + NET_IP_ALIGN, 0);
        if (IS_ERR(data))
                return PTR_ERR(data);
@@ -263,6 +393,9 @@ int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
        if (prog->type != BPF_PROG_TYPE_FLOW_DISSECTOR)
                return -EINVAL;
 
+       if (kattr->test.ctx_in || kattr->test.ctx_out)
+               return -EINVAL;
+
        data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN,
                             SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
        if (IS_ERR(data))
index 724b474ade54ce1c18c7c5c0d66db50e4509f934..15116752365a601ab3fe424cfdcaf0167bade8e8 100644 (file)
@@ -131,7 +131,7 @@ void br_do_proxy_suppress_arp(struct sk_buff *skb, struct net_bridge *br,
        u8 *arpptr, *sha;
        __be32 sip, tip;
 
-       BR_INPUT_SKB_CB(skb)->proxyarp_replied = false;
+       BR_INPUT_SKB_CB(skb)->proxyarp_replied = 0;
 
        if ((dev->flags & IFF_NOARP) ||
            !pskb_may_pull(skb, arp_hdr_len(dev)))
@@ -161,7 +161,7 @@ void br_do_proxy_suppress_arp(struct sk_buff *skb, struct net_bridge *br,
                        return;
                if (ipv4_is_zeronet(sip) || sip == tip) {
                        /* prevent flooding to neigh suppress ports */
-                       BR_INPUT_SKB_CB(skb)->proxyarp_replied = true;
+                       BR_INPUT_SKB_CB(skb)->proxyarp_replied = 1;
                        return;
                }
        }
@@ -181,7 +181,7 @@ void br_do_proxy_suppress_arp(struct sk_buff *skb, struct net_bridge *br,
                /* its our local ip, so don't proxy reply
                 * and don't forward to neigh suppress ports
                 */
-               BR_INPUT_SKB_CB(skb)->proxyarp_replied = true;
+               BR_INPUT_SKB_CB(skb)->proxyarp_replied = 1;
                return;
        }
 
@@ -217,7 +217,7 @@ void br_do_proxy_suppress_arp(struct sk_buff *skb, struct net_bridge *br,
                         */
                        if (replied ||
                            br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED))
-                               BR_INPUT_SKB_CB(skb)->proxyarp_replied = true;
+                               BR_INPUT_SKB_CB(skb)->proxyarp_replied = 1;
                }
 
                neigh_release(n);
@@ -393,7 +393,7 @@ void br_do_suppress_nd(struct sk_buff *skb, struct net_bridge *br,
        struct ipv6hdr *iphdr;
        struct neighbour *n;
 
-       BR_INPUT_SKB_CB(skb)->proxyarp_replied = false;
+       BR_INPUT_SKB_CB(skb)->proxyarp_replied = 0;
 
        if (p && (p->flags & BR_NEIGH_SUPPRESS))
                return;
@@ -401,7 +401,7 @@ void br_do_suppress_nd(struct sk_buff *skb, struct net_bridge *br,
        if (msg->icmph.icmp6_type == NDISC_NEIGHBOUR_ADVERTISEMENT &&
            !msg->icmph.icmp6_solicited) {
                /* prevent flooding to neigh suppress ports */
-               BR_INPUT_SKB_CB(skb)->proxyarp_replied = true;
+               BR_INPUT_SKB_CB(skb)->proxyarp_replied = 1;
                return;
        }
 
@@ -414,7 +414,7 @@ void br_do_suppress_nd(struct sk_buff *skb, struct net_bridge *br,
 
        if (ipv6_addr_any(saddr) || !ipv6_addr_cmp(saddr, daddr)) {
                /* prevent flooding to neigh suppress ports */
-               BR_INPUT_SKB_CB(skb)->proxyarp_replied = true;
+               BR_INPUT_SKB_CB(skb)->proxyarp_replied = 1;
                return;
        }
 
@@ -432,7 +432,7 @@ void br_do_suppress_nd(struct sk_buff *skb, struct net_bridge *br,
                /* its our own ip, so don't proxy reply
                 * and don't forward to arp suppress ports
                 */
-               BR_INPUT_SKB_CB(skb)->proxyarp_replied = true;
+               BR_INPUT_SKB_CB(skb)->proxyarp_replied = 1;
                return;
        }
 
@@ -465,7 +465,7 @@ void br_do_suppress_nd(struct sk_buff *skb, struct net_bridge *br,
                         */
                        if (replied ||
                            br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED))
-                               BR_INPUT_SKB_CB(skb)->proxyarp_replied = true;
+                               BR_INPUT_SKB_CB(skb)->proxyarp_replied = 1;
                }
                neigh_release(n);
        }
index 00573cc46c98b4358679b178d772a28501bd3bd5..b1c91f66d79c4e3e1beca4d091a7db7b5ffe3b34 100644 (file)
@@ -33,7 +33,6 @@ static const struct rhashtable_params br_fdb_rht_params = {
        .key_offset = offsetof(struct net_bridge_fdb_entry, key),
        .key_len = sizeof(struct net_bridge_fdb_key),
        .automatic_shrinking = true,
-       .locks_mul = 1,
 };
 
 static struct kmem_cache *br_fdb_cache __read_mostly;
index 48ddc60b4fbdece571592bc86689860707ddf6ba..82225b8b54f5989a0441a740515e71ea25614bd4 100644 (file)
@@ -173,6 +173,7 @@ static struct net_bridge_port *maybe_deliver(
        struct net_bridge_port *prev, struct net_bridge_port *p,
        struct sk_buff *skb, bool local_orig)
 {
+       u8 igmp_type = br_multicast_igmp_type(skb);
        int err;
 
        if (!should_deliver(p, skb))
@@ -184,8 +185,9 @@ static struct net_bridge_port *maybe_deliver(
        err = deliver_clone(prev, skb, local_orig);
        if (err)
                return ERR_PTR(err);
-
 out:
+       br_multicast_count(p->br, p, skb, igmp_type, BR_MCAST_DIR_TX);
+
        return p;
 }
 
@@ -193,7 +195,6 @@ static struct net_bridge_port *maybe_deliver(
 void br_flood(struct net_bridge *br, struct sk_buff *skb,
              enum br_pkt_type pkt_type, bool local_rcv, bool local_orig)
 {
-       u8 igmp_type = br_multicast_igmp_type(skb);
        struct net_bridge_port *prev = NULL;
        struct net_bridge_port *p;
 
@@ -226,9 +227,6 @@ void br_flood(struct net_bridge *br, struct sk_buff *skb,
                prev = maybe_deliver(prev, p, skb, local_orig);
                if (IS_ERR(prev))
                        goto out;
-               if (prev == p)
-                       br_multicast_count(p->br, p, skb, igmp_type,
-                                          BR_MCAST_DIR_TX);
        }
 
        if (!prev)
@@ -277,7 +275,6 @@ void br_multicast_flood(struct net_bridge_mdb_entry *mdst,
                        bool local_rcv, bool local_orig)
 {
        struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev;
-       u8 igmp_type = br_multicast_igmp_type(skb);
        struct net_bridge *br = netdev_priv(dev);
        struct net_bridge_port *prev = NULL;
        struct net_bridge_port_group *p;
@@ -304,13 +301,9 @@ void br_multicast_flood(struct net_bridge_mdb_entry *mdst,
                }
 
                prev = maybe_deliver(prev, port, skb, local_orig);
-delivered:
                if (IS_ERR(prev))
                        goto out;
-               if (prev == port)
-                       br_multicast_count(port->br, port, skb, igmp_type,
-                                          BR_MCAST_DIR_TX);
-
+delivered:
                if ((unsigned long)lport >= (unsigned long)port)
                        p = rcu_dereference(p->next);
                if ((unsigned long)rport >= (unsigned long)port)
index 5ea7e56119c13876a8726ffee2e9dc43ce73406f..014af7efef251871fdbf2fbca2a60714b1a3bc6f 100644 (file)
@@ -16,6 +16,9 @@
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/netfilter_bridge.h>
+#ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
+#include <net/netfilter/nf_queue.h>
+#endif
 #include <linux/neighbour.h>
 #include <net/arp.h>
 #include <linux/export.h>
 #include "br_private.h"
 #include "br_private_tunnel.h"
 
-/* Hook for brouter */
-br_should_route_hook_t __rcu *br_should_route_hook __read_mostly;
-EXPORT_SYMBOL(br_should_route_hook);
-
 static int
 br_netif_receive_skb(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
@@ -197,13 +196,63 @@ static void __br_handle_local_finish(struct sk_buff *skb)
 /* note: already called with rcu_read_lock */
 static int br_handle_local_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
-       struct net_bridge_port *p = br_port_get_rcu(skb->dev);
-
        __br_handle_local_finish(skb);
 
-       BR_INPUT_SKB_CB(skb)->brdev = p->br->dev;
-       br_pass_frame_up(skb);
-       return 0;
+       /* return 1 to signal the okfn() was called so it's ok to use the skb */
+       return 1;
+}
+
+static int nf_hook_bridge_pre(struct sk_buff *skb, struct sk_buff **pskb)
+{
+#ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
+       struct nf_hook_entries *e = NULL;
+       struct nf_hook_state state;
+       unsigned int verdict, i;
+       struct net *net;
+       int ret;
+
+       net = dev_net(skb->dev);
+#ifdef HAVE_JUMP_LABEL
+       if (!static_key_false(&nf_hooks_needed[NFPROTO_BRIDGE][NF_BR_PRE_ROUTING]))
+               goto frame_finish;
+#endif
+
+       e = rcu_dereference(net->nf.hooks_bridge[NF_BR_PRE_ROUTING]);
+       if (!e)
+               goto frame_finish;
+
+       nf_hook_state_init(&state, NF_BR_PRE_ROUTING,
+                          NFPROTO_BRIDGE, skb->dev, NULL, NULL,
+                          net, br_handle_frame_finish);
+
+       for (i = 0; i < e->num_hook_entries; i++) {
+               verdict = nf_hook_entry_hookfn(&e->hooks[i], skb, &state);
+               switch (verdict & NF_VERDICT_MASK) {
+               case NF_ACCEPT:
+                       if (BR_INPUT_SKB_CB(skb)->br_netfilter_broute) {
+                               *pskb = skb;
+                               return RX_HANDLER_PASS;
+                       }
+                       break;
+               case NF_DROP:
+                       kfree_skb(skb);
+                       return RX_HANDLER_CONSUMED;
+               case NF_QUEUE:
+                       ret = nf_queue(skb, &state, e, i, verdict);
+                       if (ret == 1)
+                               continue;
+                       return RX_HANDLER_CONSUMED;
+               default: /* STOLEN */
+                       return RX_HANDLER_CONSUMED;
+               }
+       }
+frame_finish:
+       net = dev_net(skb->dev);
+       br_handle_frame_finish(net, NULL, skb);
+#else
+       br_handle_frame_finish(dev_net(skb->dev), NULL, skb);
+#endif
+       return RX_HANDLER_CONSUMED;
 }
 
 /*
@@ -215,7 +264,6 @@ rx_handler_result_t br_handle_frame(struct sk_buff **pskb)
        struct net_bridge_port *p;
        struct sk_buff *skb = *pskb;
        const unsigned char *dest = eth_hdr(skb)->h_dest;
-       br_should_route_hook_t *rhook;
 
        if (unlikely(skb->pkt_type == PACKET_LOOPBACK))
                return RX_HANDLER_PASS;
@@ -227,6 +275,8 @@ rx_handler_result_t br_handle_frame(struct sk_buff **pskb)
        if (!skb)
                return RX_HANDLER_CONSUMED;
 
+       memset(skb->cb, 0, sizeof(struct br_input_skb_cb));
+
        p = br_port_get_rcu(skb->dev);
        if (p->flags & BR_VLAN_TUNNEL) {
                if (br_handle_ingress_vlan_tunnel(skb, p,
@@ -280,32 +330,28 @@ rx_handler_result_t br_handle_frame(struct sk_buff **pskb)
                                goto forward;
                }
 
-               /* Deliver packet to local host only */
-               NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, dev_net(skb->dev),
-                       NULL, skb, skb->dev, NULL, br_handle_local_finish);
-               return RX_HANDLER_CONSUMED;
+               /* The else clause should be hit when nf_hook():
+                *   - returns < 0 (drop/error)
+                *   - returns = 0 (stolen/nf_queue)
+                * Thus return 1 from the okfn() to signal the skb is ok to pass
+                */
+               if (NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN,
+                           dev_net(skb->dev), NULL, skb, skb->dev, NULL,
+                           br_handle_local_finish) == 1) {
+                       return RX_HANDLER_PASS;
+               } else {
+                       return RX_HANDLER_CONSUMED;
+               }
        }
 
 forward:
        switch (p->state) {
        case BR_STATE_FORWARDING:
-               rhook = rcu_dereference(br_should_route_hook);
-               if (rhook) {
-                       if ((*rhook)(skb)) {
-                               *pskb = skb;
-                               return RX_HANDLER_PASS;
-                       }
-                       dest = eth_hdr(skb)->h_dest;
-               }
-               /* fall through */
        case BR_STATE_LEARNING:
                if (ether_addr_equal(p->br->dev->dev_addr, dest))
                        skb->pkt_type = PACKET_HOST;
 
-               NF_HOOK(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING,
-                       dev_net(skb->dev), NULL, skb, skb->dev, NULL,
-                       br_handle_frame_finish);
-               break;
+               return nf_hook_bridge_pre(skb, pskb);
        default:
 drop:
                kfree_skb(skb);
index f5343dfac2826ea39d8172d4d8459c438b71044b..c2a30f79a9d013de41e90e969324cb19075dfbfe 100644 (file)
@@ -44,7 +44,6 @@ static const struct rhashtable_params br_mdb_rht_params = {
        .key_offset = offsetof(struct net_bridge_mdb_entry, addr),
        .key_len = sizeof(struct br_ip),
        .automatic_shrinking = true,
-       .locks_mul = 1,
 };
 
 static void br_multicast_start_querier(struct net_bridge *br,
@@ -65,23 +64,6 @@ static void br_ip6_multicast_leave_group(struct net_bridge *br,
                                         __u16 vid, const unsigned char *src);
 #endif
 
-static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b)
-{
-       if (a->proto != b->proto)
-               return 0;
-       if (a->vid != b->vid)
-               return 0;
-       switch (a->proto) {
-       case htons(ETH_P_IP):
-               return a->u.ip4 == b->u.ip4;
-#if IS_ENABLED(CONFIG_IPV6)
-       case htons(ETH_P_IPV6):
-               return ipv6_addr_equal(&a->u.ip6, &b->u.ip6);
-#endif
-       }
-       return 0;
-}
-
 static struct net_bridge_mdb_entry *br_mdb_ip_get_rcu(struct net_bridge *br,
                                                      struct br_ip *dst)
 {
@@ -601,6 +583,7 @@ static int br_ip4_multicast_add_group(struct net_bridge *br,
        if (ipv4_is_local_multicast(group))
                return 0;
 
+       memset(&br_group, 0, sizeof(br_group));
        br_group.u.ip4 = group;
        br_group.proto = htons(ETH_P_IP);
        br_group.vid = vid;
@@ -1497,6 +1480,7 @@ static void br_ip4_multicast_leave_group(struct net_bridge *br,
 
        own_query = port ? &port->ip4_own_query : &br->ip4_own_query;
 
+       memset(&br_group, 0, sizeof(br_group));
        br_group.u.ip4 = group;
        br_group.proto = htons(ETH_P_IP);
        br_group.vid = vid;
@@ -1520,6 +1504,7 @@ static void br_ip6_multicast_leave_group(struct net_bridge *br,
 
        own_query = port ? &port->ip6_own_query : &br->ip6_own_query;
 
+       memset(&br_group, 0, sizeof(br_group));
        br_group.u.ip6 = *group;
        br_group.proto = htons(ETH_P_IPV6);
        br_group.vid = vid;
@@ -2028,7 +2013,8 @@ static void br_multicast_start_querier(struct net_bridge *br,
 
        __br_multicast_open(br, query);
 
-       list_for_each_entry(port, &br->port_list, list) {
+       rcu_read_lock();
+       list_for_each_entry_rcu(port, &br->port_list, list) {
                if (port->state == BR_STATE_DISABLED ||
                    port->state == BR_STATE_BLOCKING)
                        continue;
@@ -2040,6 +2026,7 @@ static void br_multicast_start_querier(struct net_bridge *br,
                        br_multicast_enable(&port->ip6_own_query);
 #endif
        }
+       rcu_read_unlock();
 }
 
 int br_multicast_toggle(struct net_bridge *br, unsigned long val)
index 4f9f59eba8b4552c63460be52c81ce3376221782..8dfcc2d285d8ccd640bce0b084f0b2418f62204d 100644 (file)
@@ -1441,7 +1441,7 @@ static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev)
            nla_put_u8(skb, IFLA_BR_VLAN_STATS_ENABLED,
                       br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) ||
            nla_put_u8(skb, IFLA_BR_VLAN_STATS_PER_PORT,
-                      br_opt_get(br, IFLA_BR_VLAN_STATS_PER_PORT)))
+                      br_opt_get(br, BROPT_VLAN_STATS_PER_PORT)))
                return -EMSGSIZE;
 #endif
 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
index 7946aa3b6e099d529773023d119646e1b66c097a..4bea2f11da9bd024de9ce1ec8686ff36eb671821 100644 (file)
@@ -425,15 +425,16 @@ struct br_input_skb_cb {
        struct net_device *brdev;
 
 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
-       int igmp;
-       int mrouters_only;
+       u8 igmp;
+       u8 mrouters_only:1;
 #endif
-
-       bool proxyarp_replied;
-       bool src_port_isolated;
-
+       u8 proxyarp_replied:1;
+       u8 src_port_isolated:1;
 #ifdef CONFIG_BRIDGE_VLAN_FILTERING
-       bool vlan_filtered;
+       u8 vlan_filtered:1;
+#endif
+#ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
+       u8 br_netfilter_broute:1;
 #endif
 
 #ifdef CONFIG_NET_SWITCHDEV
index 808e2b914015fa4dd69e7736054c20e3c9e1aaba..8d65ae5210e09b7e732feaf2d60d5014de33a7e5 100644 (file)
@@ -117,7 +117,8 @@ void br_stp_disable_port(struct net_bridge_port *p)
        del_timer(&p->forward_delay_timer);
        del_timer(&p->hold_timer);
 
-       br_fdb_delete_by_port(br, p, 0, 0);
+       if (!rcu_access_pointer(p->backup_port))
+               br_fdb_delete_by_port(br, p, 0, 0);
        br_multicast_disable_port(p);
 
        br_configuration_update(br);
index 96abf8feb9dc6c2e01a39fa1f9827fc4da1bb1b7..0a02822b56675884998fb0ca328be0666cc73139 100644 (file)
@@ -21,7 +21,6 @@ static const struct rhashtable_params br_vlan_rht_params = {
        .key_offset = offsetof(struct net_bridge_vlan, vid),
        .key_len = sizeof(u16),
        .nelem_hint = 3,
-       .locks_mul = 1,
        .max_size = VLAN_N_VID,
        .obj_cmpfn = br_vlan_cmp,
        .automatic_shrinking = true,
index 6d2c4eed2dc892b15a99f4741dc8639b25a63e32..75815186366976a95329d7f5a93959b7eb9f9632 100644 (file)
@@ -34,7 +34,6 @@ static const struct rhashtable_params br_vlan_tunnel_rht_params = {
        .key_offset = offsetof(struct net_bridge_vlan, tinfo.tunnel_id),
        .key_len = sizeof(__be64),
        .nelem_hint = 3,
-       .locks_mul = 1,
        .obj_cmpfn = br_vlan_tunid_cmp,
        .automatic_shrinking = true,
 };
index 276b60262981c95a9fccd508e8d8123212d535de..ec2652a459da87664ec6847c2fbad6516e3eafac 100644 (file)
@@ -15,6 +15,8 @@
 #include <linux/module.h>
 #include <linux/if_bridge.h>
 
+#include "../br_private.h"
+
 /* EBT_ACCEPT means the frame will be bridged
  * EBT_DROP means the frame will be routed
  */
@@ -48,30 +50,63 @@ static const struct ebt_table broute_table = {
        .me             = THIS_MODULE,
 };
 
-static int ebt_broute(struct sk_buff *skb)
+static unsigned int ebt_broute(void *priv, struct sk_buff *skb,
+                              const struct nf_hook_state *s)
 {
+       struct net_bridge_port *p = br_port_get_rcu(skb->dev);
        struct nf_hook_state state;
+       unsigned char *dest;
        int ret;
 
+       if (!p || p->state != BR_STATE_FORWARDING)
+               return NF_ACCEPT;
+
        nf_hook_state_init(&state, NF_BR_BROUTING,
-                          NFPROTO_BRIDGE, skb->dev, NULL, NULL,
-                          dev_net(skb->dev), NULL);
+                          NFPROTO_BRIDGE, s->in, NULL, NULL,
+                          s->net, NULL);
 
        ret = ebt_do_table(skb, &state, state.net->xt.broute_table);
-       if (ret == NF_DROP)
-               return 1; /* route it */
-       return 0; /* bridge it */
+
+       if (ret != NF_DROP)
+               return ret;
+
+       /* DROP in ebtables -t broute means that the
+        * skb should be routed, not bridged.
+        * This is awkward, but can't be changed for compatibility
+        * reasons.
+        *
+        * We map DROP to ACCEPT and set the ->br_netfilter_broute flag.
+        */
+       BR_INPUT_SKB_CB(skb)->br_netfilter_broute = 1;
+
+       /* undo PACKET_HOST mangling done in br_input in case the dst
+        * address matches the logical bridge but not the port.
+        */
+       dest = eth_hdr(skb)->h_dest;
+       if (skb->pkt_type == PACKET_HOST &&
+           !ether_addr_equal(skb->dev->dev_addr, dest) &&
+            ether_addr_equal(p->br->dev->dev_addr, dest))
+               skb->pkt_type = PACKET_OTHERHOST;
+
+       return NF_ACCEPT;
 }
 
+static const struct nf_hook_ops ebt_ops_broute = {
+       .hook           = ebt_broute,
+       .pf             = NFPROTO_BRIDGE,
+       .hooknum        = NF_BR_PRE_ROUTING,
+       .priority       = NF_BR_PRI_FIRST,
+};
+
 static int __net_init broute_net_init(struct net *net)
 {
-       return ebt_register_table(net, &broute_table, NULL,
+       return ebt_register_table(net, &broute_table, &ebt_ops_broute,
                                  &net->xt.broute_table);
 }
 
 static void __net_exit broute_net_exit(struct net *net)
 {
-       ebt_unregister_table(net, net->xt.broute_table, NULL);
+       ebt_unregister_table(net, net->xt.broute_table, &ebt_ops_broute);
 }
 
 static struct pernet_operations broute_net_ops = {
@@ -81,21 +116,11 @@ static struct pernet_operations broute_net_ops = {
 
 static int __init ebtable_broute_init(void)
 {
-       int ret;
-
-       ret = register_pernet_subsys(&broute_net_ops);
-       if (ret < 0)
-               return ret;
-       /* see br_input.c */
-       RCU_INIT_POINTER(br_should_route_hook,
-                          (br_should_route_hook_t *)ebt_broute);
-       return 0;
+       return register_pernet_subsys(&broute_net_ops);
 }
 
 static void __exit ebtable_broute_fini(void)
 {
-       RCU_INIT_POINTER(br_should_route_hook, NULL);
-       synchronize_net();
        unregister_pernet_subsys(&broute_net_ops);
 }
 
index eb15891f8b9ff18842b7d43e96c75733ef7aaa99..383f0328ff6871bfadcc84f0bea469c70c908dc6 100644 (file)
@@ -1221,10 +1221,6 @@ int ebt_register_table(struct net *net, const struct ebt_table *input_table,
        mutex_unlock(&ebt_mutex);
 
        WRITE_ONCE(*res, table);
-
-       if (!ops)
-               return 0;
-
        ret = nf_register_net_hooks(net, ops, hweight32(table->valid_hooks));
        if (ret) {
                __ebt_unregister_table(net, table);
@@ -1248,8 +1244,7 @@ int ebt_register_table(struct net *net, const struct ebt_table *input_table,
 void ebt_unregister_table(struct net *net, struct ebt_table *table,
                          const struct nf_hook_ops *ops)
 {
-       if (ops)
-               nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks));
+       nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks));
        __ebt_unregister_table(net, table);
 }
 
index 711d7156efd8bc94b449d0e8066eedd4fe0d5747..6c6e01963aaccd256c887d9c2aa68107de469f08 100644 (file)
@@ -186,15 +186,19 @@ static int transmit(struct cflayer *layer, struct cfpkt *pkt)
                goto noxoff;
 
        if (likely(!netif_queue_stopped(caifd->netdev))) {
+               struct Qdisc *sch;
+
                /* If we run with a TX queue, check if the queue is too long*/
                txq = netdev_get_tx_queue(skb->dev, 0);
-               qlen = qdisc_qlen(rcu_dereference_bh(txq->qdisc));
-
-               if (likely(qlen == 0))
+               sch = rcu_dereference_bh(txq->qdisc);
+               if (likely(qdisc_is_empty(sch)))
                        goto noxoff;
 
+               /* can check for explicit qdisc len value only !NOLOCK,
+                * always set flow off otherwise
+                */
                high = (caifd->netdev->tx_queue_len * q_high) / 100;
-               if (likely(qlen < high))
+               if (!(sch->flags & TCQ_F_NOLOCK) && likely(sch->q.qlen < high))
                        goto noxoff;
        }
 
index 7e71b0df1fbc9185b192a43427c7cb281b778ca1..3083988ce729dbe01771e9433b7de72e484394f9 100644 (file)
@@ -840,6 +840,7 @@ static bool ceph_msg_data_bio_advance(struct ceph_msg_data_cursor *cursor,
                                        size_t bytes)
 {
        struct ceph_bio_iter *it = &cursor->bio_iter;
+       struct page *page = bio_iter_page(it->bio, it->iter);
 
        BUG_ON(bytes > cursor->resid);
        BUG_ON(bytes > bio_iter_len(it->bio, it->iter));
@@ -851,7 +852,8 @@ static bool ceph_msg_data_bio_advance(struct ceph_msg_data_cursor *cursor,
                return false;   /* no more data */
        }
 
-       if (!bytes || (it->iter.bi_size && it->iter.bi_bvec_done))
+       if (!bytes || (it->iter.bi_size && it->iter.bi_bvec_done &&
+                      page == bio_iter_page(it->bio, it->iter)))
                return false;   /* more bytes to process in this segment */
 
        if (!it->iter.bi_size) {
@@ -899,6 +901,7 @@ static bool ceph_msg_data_bvecs_advance(struct ceph_msg_data_cursor *cursor,
                                        size_t bytes)
 {
        struct bio_vec *bvecs = cursor->data->bvec_pos.bvecs;
+       struct page *page = bvec_iter_page(bvecs, cursor->bvec_iter);
 
        BUG_ON(bytes > cursor->resid);
        BUG_ON(bytes > bvec_iter_len(bvecs, cursor->bvec_iter));
@@ -910,7 +913,8 @@ static bool ceph_msg_data_bvecs_advance(struct ceph_msg_data_cursor *cursor,
                return false;   /* no more data */
        }
 
-       if (!bytes || cursor->bvec_iter.bi_bvec_done)
+       if (!bytes || (cursor->bvec_iter.bi_bvec_done &&
+                      page == bvec_iter_page(bvecs, cursor->bvec_iter)))
                return false;   /* more bytes to process in this segment */
 
        BUG_ON(cursor->last_piece);
index 0dafec5cada0fbde8da5546277e8af787f68e469..45a162ef5e02570189b006492ad8cd65c33c1990 100644 (file)
@@ -167,7 +167,7 @@ struct sk_buff *__skb_try_recv_from_queue(struct sock *sk,
                                          unsigned int flags,
                                          void (*destructor)(struct sock *sk,
                                                           struct sk_buff *skb),
-                                         int *peeked, int *off, int *err,
+                                         int *off, int *err,
                                          struct sk_buff **last)
 {
        bool peek_at_off = false;
@@ -194,7 +194,6 @@ struct sk_buff *__skb_try_recv_from_queue(struct sock *sk,
                                        return NULL;
                                }
                        }
-                       *peeked = 1;
                        refcount_inc(&skb->users);
                } else {
                        __skb_unlink(skb, queue);
@@ -212,7 +211,6 @@ struct sk_buff *__skb_try_recv_from_queue(struct sock *sk,
  *     @sk: socket
  *     @flags: MSG\_ flags
  *     @destructor: invoked under the receive lock on successful dequeue
- *     @peeked: returns non-zero if this packet has been seen before
  *     @off: an offset in bytes to peek skb from. Returns an offset
  *           within an skb where data actually starts
  *     @err: error code returned
@@ -246,7 +244,7 @@ struct sk_buff *__skb_try_recv_from_queue(struct sock *sk,
 struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned int flags,
                                        void (*destructor)(struct sock *sk,
                                                           struct sk_buff *skb),
-                                       int *peeked, int *off, int *err,
+                                       int *off, int *err,
                                        struct sk_buff **last)
 {
        struct sk_buff_head *queue = &sk->sk_receive_queue;
@@ -260,7 +258,6 @@ struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned int flags,
        if (error)
                goto no_packet;
 
-       *peeked = 0;
        do {
                /* Again only user level code calls this function, so nothing
                 * interrupt level will suddenly eat the receive_queue.
@@ -270,7 +267,7 @@ struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned int flags,
                 */
                spin_lock_irqsave(&queue->lock, cpu_flags);
                skb = __skb_try_recv_from_queue(sk, queue, flags, destructor,
-                                               peeked, off, &error, last);
+                                               off, &error, last);
                spin_unlock_irqrestore(&queue->lock, cpu_flags);
                if (error)
                        goto no_packet;
@@ -281,7 +278,7 @@ struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned int flags,
                        break;
 
                sk_busy_loop(sk, flags & MSG_DONTWAIT);
-       } while (!skb_queue_empty(&sk->sk_receive_queue));
+       } while (sk->sk_receive_queue.prev != *last);
 
        error = -EAGAIN;
 
@@ -294,7 +291,7 @@ EXPORT_SYMBOL(__skb_try_recv_datagram);
 struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
                                    void (*destructor)(struct sock *sk,
                                                       struct sk_buff *skb),
-                                   int *peeked, int *off, int *err)
+                                   int *off, int *err)
 {
        struct sk_buff *skb, *last;
        long timeo;
@@ -302,8 +299,8 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
        timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
 
        do {
-               skb = __skb_try_recv_datagram(sk, flags, destructor, peeked,
-                                             off, err, &last);
+               skb = __skb_try_recv_datagram(sk, flags, destructor, off, err,
+                                             &last);
                if (skb)
                        return skb;
 
@@ -319,10 +316,10 @@ EXPORT_SYMBOL(__skb_recv_datagram);
 struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned int flags,
                                  int noblock, int *err)
 {
-       int peeked, off = 0;
+       int off = 0;
 
        return __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
-                                  NULL, &peeked, &off, err);
+                                  NULL, &off, err);
 }
 EXPORT_SYMBOL(skb_recv_datagram);
 
index 9823b7713f79dec2f8927b5ebb380d3dddd8fbb4..22f2640f559a31cfd89dce69fafa20f7d77b0413 100644 (file)
 #include <trace/events/napi.h>
 #include <trace/events/net.h>
 #include <trace/events/skb.h>
-#include <linux/pci.h>
 #include <linux/inetdevice.h>
 #include <linux/cpu_rmap.h>
 #include <linux/static_key.h>
@@ -1185,7 +1184,21 @@ int dev_change_name(struct net_device *dev, const char *newname)
        BUG_ON(!dev_net(dev));
 
        net = dev_net(dev);
-       if (dev->flags & IFF_UP)
+
+       /* Some auto-enslaved devices e.g. failover slaves are
+        * special, as userspace might rename the device after
+        * the interface had been brought up and running since
+        * the point kernel initiated auto-enslavement. Allow
+        * live name change even when these slave devices are
+        * up and running.
+        *
+        * Typically, users of these auto-enslaving devices
+        * don't actually care about slave name change, as
+        * they are supposed to operate on master interface
+        * directly.
+        */
+       if (dev->flags & IFF_UP &&
+           likely(!(dev->priv_flags & IFF_LIVE_RENAME_OK)))
                return -EBUSY;
 
        write_seqcount_begin(&devnet_rename_seq);
@@ -3566,9 +3579,6 @@ static void skb_update_prio(struct sk_buff *skb)
 #define skb_update_prio(skb)
 #endif
 
-DEFINE_PER_CPU(int, xmit_recursion);
-EXPORT_SYMBOL(xmit_recursion);
-
 /**
  *     dev_loopback_xmit - loop back @skb
  *     @net: network namespace this loopback is happening in
@@ -3857,8 +3867,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
                int cpu = smp_processor_id(); /* ok because BHs are off */
 
                if (txq->xmit_lock_owner != cpu) {
-                       if (unlikely(__this_cpu_read(xmit_recursion) >
-                                    XMIT_RECURSION_LIMIT))
+                       if (dev_xmit_recursion())
                                goto recursion_alert;
 
                        skb = validate_xmit_skb(skb, dev, &again);
@@ -3868,9 +3877,9 @@ static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
                        HARD_TX_LOCK(dev, txq, cpu);
 
                        if (!netif_xmit_stopped(txq)) {
-                               __this_cpu_inc(xmit_recursion);
+                               dev_xmit_recursion_inc();
                                skb = dev_hard_start_xmit(skb, dev, txq, &rc);
-                               __this_cpu_dec(xmit_recursion);
+                               dev_xmit_recursion_dec();
                                if (dev_xmit_complete(rc)) {
                                        HARD_TX_UNLOCK(dev, txq);
                                        goto out;
@@ -5022,8 +5031,10 @@ static inline void __netif_receive_skb_list_ptype(struct list_head *head,
        if (pt_prev->list_func != NULL)
                pt_prev->list_func(head, pt_prev, orig_dev);
        else
-               list_for_each_entry_safe(skb, next, head, list)
+               list_for_each_entry_safe(skb, next, head, list) {
+                       skb_list_del_init(skb);
                        pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
+               }
 }
 
 static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemalloc)
@@ -7905,13 +7916,20 @@ int dev_get_port_parent_id(struct net_device *dev,
        struct netdev_phys_item_id first = { };
        struct net_device *lower_dev;
        struct list_head *iter;
-       int err = -EOPNOTSUPP;
+       int err;
+
+       if (ops->ndo_get_port_parent_id) {
+               err = ops->ndo_get_port_parent_id(dev, ppid);
+               if (err != -EOPNOTSUPP)
+                       return err;
+       }
 
-       if (ops->ndo_get_port_parent_id)
-               return ops->ndo_get_port_parent_id(dev, ppid);
+       err = devlink_compat_switch_id_get(dev, ppid);
+       if (!err || err != -EOPNOTSUPP)
+               return err;
 
        if (!recurse)
-               return err;
+               return -EOPNOTSUPP;
 
        netdev_for_each_lower_dev(dev, lower_dev, iter) {
                err = dev_get_port_parent_id(lower_dev, ppid, recurse);
index dc3a99148ee791ec9b406c57761544484dc1e342..b2715a187a1107b6a797d24137dba6c284f86acc 100644 (file)
@@ -5358,24 +5358,38 @@ static void __devlink_port_type_set(struct devlink_port *devlink_port,
 void devlink_port_type_eth_set(struct devlink_port *devlink_port,
                               struct net_device *netdev)
 {
+       const struct net_device_ops *ops = netdev->netdev_ops;
+
        /* If driver registers devlink port, it should set devlink port
         * attributes accordingly so the compat functions are called
         * and the original ops are not used.
         */
-       if (netdev->netdev_ops->ndo_get_phys_port_name) {
+       if (ops->ndo_get_phys_port_name) {
                /* Some drivers use the same set of ndos for netdevs
                 * that have devlink_port registered and also for
                 * those who don't. Make sure that ndo_get_phys_port_name
                 * returns -EOPNOTSUPP here in case it is defined.
                 * Warn if not.
                 */
-               const struct net_device_ops *ops = netdev->netdev_ops;
                char name[IFNAMSIZ];
                int err;
 
                err = ops->ndo_get_phys_port_name(netdev, name, sizeof(name));
                WARN_ON(err != -EOPNOTSUPP);
        }
+       if (ops->ndo_get_port_parent_id) {
+               /* Some drivers use the same set of ndos for netdevs
+                * that have devlink_port registered and also for
+                * those who don't. Make sure that ndo_get_port_parent_id
+                * returns -EOPNOTSUPP here in case it is defined.
+                * Warn if not.
+                */
+               struct netdev_phys_item_id ppid;
+               int err;
+
+               err = ops->ndo_get_port_parent_id(netdev, &ppid);
+               WARN_ON(err != -EOPNOTSUPP);
+       }
        __devlink_port_type_set(devlink_port, DEVLINK_PORT_TYPE_ETH, netdev);
 }
 EXPORT_SYMBOL_GPL(devlink_port_type_eth_set);
@@ -5414,11 +5428,16 @@ EXPORT_SYMBOL_GPL(devlink_port_type_clear);
  *     @split: indicates if this is split port
  *     @split_subport_number: if the port is split, this is the number
  *                            of subport.
+ *     @switch_id: if the port is part of switch, this is buffer with ID,
+ *                 otwerwise this is NULL
+ *     @switch_id_len: length of the switch_id buffer
  */
 void devlink_port_attrs_set(struct devlink_port *devlink_port,
                            enum devlink_port_flavour flavour,
                            u32 port_number, bool split,
-                           u32 split_subport_number)
+                           u32 split_subport_number,
+                           const unsigned char *switch_id,
+                           unsigned char switch_id_len)
 {
        struct devlink_port_attrs *attrs = &devlink_port->attrs;
 
@@ -5429,6 +5448,15 @@ void devlink_port_attrs_set(struct devlink_port *devlink_port,
        attrs->port_number = port_number;
        attrs->split = split;
        attrs->split_subport_number = split_subport_number;
+       if (switch_id) {
+               attrs->switch_port = true;
+               if (WARN_ON(switch_id_len > MAX_PHYS_ITEM_ID_LEN))
+                       switch_id_len = MAX_PHYS_ITEM_ID_LEN;
+               memcpy(attrs->switch_id.id, switch_id, switch_id_len);
+               attrs->switch_id.id_len = switch_id_len;
+       } else {
+               attrs->switch_port = false;
+       }
 }
 EXPORT_SYMBOL_GPL(devlink_port_attrs_set);
 
@@ -6494,6 +6522,25 @@ int devlink_compat_phys_port_name_get(struct net_device *dev,
        return __devlink_port_phys_port_name_get(devlink_port, name, len);
 }
 
+int devlink_compat_switch_id_get(struct net_device *dev,
+                                struct netdev_phys_item_id *ppid)
+{
+       struct devlink_port *devlink_port;
+
+       /* RTNL mutex is held here which ensures that devlink_port
+        * instance cannot disappear in the middle. No need to take
+        * any devlink lock as only permanent values are accessed.
+        */
+       ASSERT_RTNL();
+       devlink_port = netdev_to_devlink_port(dev);
+       if (!devlink_port || !devlink_port->attrs.switch_port)
+               return -EOPNOTSUPP;
+
+       memcpy(ppid, &devlink_port->attrs.switch_id, sizeof(*ppid));
+
+       return 0;
+}
+
 static int __init devlink_init(void)
 {
        return genl_register_family(&devlink_nl_family);
index 387d67eb75ab64ab09c9ad780db1aceaa62dd925..4a593853cbf283832d57ff085690d9110a1eee1e 100644 (file)
@@ -1798,11 +1798,16 @@ static int ethtool_get_strings(struct net_device *dev, void __user *useraddr)
        WARN_ON_ONCE(!ret);
 
        gstrings.len = ret;
-       data = vzalloc(array_size(gstrings.len, ETH_GSTRING_LEN));
-       if (gstrings.len && !data)
-               return -ENOMEM;
 
-       __ethtool_get_strings(dev, gstrings.string_set, data);
+       if (gstrings.len) {
+               data = vzalloc(array_size(gstrings.len, ETH_GSTRING_LEN));
+               if (!data)
+                       return -ENOMEM;
+
+               __ethtool_get_strings(dev, gstrings.string_set, data);
+       } else {
+               data = NULL;
+       }
 
        ret = -EFAULT;
        if (copy_to_user(useraddr, &gstrings, sizeof(gstrings)))
@@ -1898,11 +1903,15 @@ static int ethtool_get_stats(struct net_device *dev, void __user *useraddr)
                return -EFAULT;
 
        stats.n_stats = n_stats;
-       data = vzalloc(array_size(n_stats, sizeof(u64)));
-       if (n_stats && !data)
-               return -ENOMEM;
 
-       ops->get_ethtool_stats(dev, &stats, data);
+       if (n_stats) {
+               data = vzalloc(array_size(n_stats, sizeof(u64)));
+               if (!data)
+                       return -ENOMEM;
+               ops->get_ethtool_stats(dev, &stats, data);
+       } else {
+               data = NULL;
+       }
 
        ret = -EFAULT;
        if (copy_to_user(useraddr, &stats, sizeof(stats)))
@@ -1942,16 +1951,21 @@ static int ethtool_get_phy_stats(struct net_device *dev, void __user *useraddr)
                return -EFAULT;
 
        stats.n_stats = n_stats;
-       data = vzalloc(array_size(n_stats, sizeof(u64)));
-       if (n_stats && !data)
-               return -ENOMEM;
 
-       if (dev->phydev && !ops->get_ethtool_phy_stats) {
-               ret = phy_ethtool_get_stats(dev->phydev, &stats, data);
-               if (ret < 0)
-                       return ret;
+       if (n_stats) {
+               data = vzalloc(array_size(n_stats, sizeof(u64)));
+               if (!data)
+                       return -ENOMEM;
+
+               if (dev->phydev && !ops->get_ethtool_phy_stats) {
+                       ret = phy_ethtool_get_stats(dev->phydev, &stats, data);
+                       if (ret < 0)
+                               goto out;
+               } else {
+                       ops->get_ethtool_phy_stats(dev, &stats, data);
+               }
        } else {
-               ops->get_ethtool_phy_stats(dev, &stats, data);
+               data = NULL;
        }
 
        ret = -EFAULT;
index 4a92a98ccce9a0570cdc75c66180db2f7305073f..b5cd3c727285d7a1738118c246abce8d31dac08f 100644 (file)
@@ -80,14 +80,14 @@ static int failover_slave_register(struct net_device *slave_dev)
                goto err_upper_link;
        }
 
-       slave_dev->priv_flags |= IFF_FAILOVER_SLAVE;
+       slave_dev->priv_flags |= (IFF_FAILOVER_SLAVE | IFF_LIVE_RENAME_OK);
 
        if (fops && fops->slave_register &&
            !fops->slave_register(slave_dev, failover_dev))
                return NOTIFY_OK;
 
        netdev_upper_dev_unlink(slave_dev, failover_dev);
-       slave_dev->priv_flags &= ~IFF_FAILOVER_SLAVE;
+       slave_dev->priv_flags &= ~(IFF_FAILOVER_SLAVE | IFF_LIVE_RENAME_OK);
 err_upper_link:
        netdev_rx_handler_unregister(slave_dev);
 done:
@@ -121,7 +121,7 @@ int failover_slave_unregister(struct net_device *slave_dev)
 
        netdev_rx_handler_unregister(slave_dev);
        netdev_upper_dev_unlink(slave_dev, failover_dev);
-       slave_dev->priv_flags &= ~IFF_FAILOVER_SLAVE;
+       slave_dev->priv_flags &= ~(IFF_FAILOVER_SLAVE | IFF_LIVE_RENAME_OK);
 
        if (fops && fops->slave_unregister &&
            !fops->slave_unregister(slave_dev, failover_dev))
index 4a8455757507c49965c234bd4b00e1bfe7f00beb..1644a16afceca4315071f18af594ccc90421b527 100644 (file)
@@ -2016,7 +2016,7 @@ static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
 {
        int ret;
 
-       if (unlikely(__this_cpu_read(xmit_recursion) > XMIT_RECURSION_LIMIT)) {
+       if (dev_xmit_recursion()) {
                net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n");
                kfree_skb(skb);
                return -ENETDOWN;
@@ -2024,9 +2024,9 @@ static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
 
        skb->dev = dev;
 
-       __this_cpu_inc(xmit_recursion);
+       dev_xmit_recursion_inc();
        ret = dev_queue_xmit(skb);
-       __this_cpu_dec(xmit_recursion);
+       dev_xmit_recursion_dec();
 
        return ret;
 }
@@ -2970,11 +2970,14 @@ static u32 bpf_skb_net_base_len(const struct sk_buff *skb)
 #define BPF_F_ADJ_ROOM_MASK            (BPF_F_ADJ_ROOM_FIXED_GSO | \
                                         BPF_F_ADJ_ROOM_ENCAP_L3_MASK | \
                                         BPF_F_ADJ_ROOM_ENCAP_L4_GRE | \
-                                        BPF_F_ADJ_ROOM_ENCAP_L4_UDP)
+                                        BPF_F_ADJ_ROOM_ENCAP_L4_UDP | \
+                                        BPF_F_ADJ_ROOM_ENCAP_L2( \
+                                         BPF_ADJ_ROOM_ENCAP_L2_MASK))
 
 static int bpf_skb_net_grow(struct sk_buff *skb, u32 off, u32 len_diff,
                            u64 flags)
 {
+       u8 inner_mac_len = flags >> BPF_ADJ_ROOM_ENCAP_L2_SHIFT;
        bool encap = flags & BPF_F_ADJ_ROOM_ENCAP_L3_MASK;
        u16 mac_len = 0, inner_net = 0, inner_trans = 0;
        unsigned int gso_type = SKB_GSO_DODGY;
@@ -3009,6 +3012,8 @@ static int bpf_skb_net_grow(struct sk_buff *skb, u32 off, u32 len_diff,
 
                mac_len = skb->network_header - skb->mac_header;
                inner_net = skb->network_header;
+               if (inner_mac_len > len_diff)
+                       return -EINVAL;
                inner_trans = skb->transport_header;
        }
 
@@ -3017,8 +3022,7 @@ static int bpf_skb_net_grow(struct sk_buff *skb, u32 off, u32 len_diff,
                return ret;
 
        if (encap) {
-               /* inner mac == inner_net on l3 encap */
-               skb->inner_mac_header = inner_net;
+               skb->inner_mac_header = inner_net - inner_mac_len;
                skb->inner_network_header = inner_net;
                skb->inner_transport_header = inner_trans;
                skb_set_inner_protocol(skb, skb->protocol);
@@ -3032,7 +3036,7 @@ static int bpf_skb_net_grow(struct sk_buff *skb, u32 off, u32 len_diff,
                        gso_type |= SKB_GSO_GRE;
                else if (flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV6)
                        gso_type |= SKB_GSO_IPXIP6;
-               else
+               else if (flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV4)
                        gso_type |= SKB_GSO_IPXIP4;
 
                if (flags & BPF_F_ADJ_ROOM_ENCAP_L4_GRE ||
@@ -4458,6 +4462,8 @@ BPF_CALL_3(bpf_bind, struct bpf_sock_addr_kern *, ctx, struct sockaddr *, addr,
         * Only binding to IP is supported.
         */
        err = -EINVAL;
+       if (addr_len < offsetofend(struct sockaddr, sa_family))
+               return err;
        if (addr->sa_family == AF_INET) {
                if (addr_len < sizeof(struct sockaddr_in))
                        return err;
@@ -4555,11 +4561,11 @@ static int bpf_fib_set_fwd_params(struct bpf_fib_lookup *params,
 static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
                               u32 flags, bool check_mtu)
 {
+       struct fib_nh_common *nhc;
        struct in_device *in_dev;
        struct neighbour *neigh;
        struct net_device *dev;
        struct fib_result res;
-       struct fib_nh *nh;
        struct flowi4 fl4;
        int err;
        u32 mtu;
@@ -4632,22 +4638,33 @@ static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
                        return BPF_FIB_LKUP_RET_FRAG_NEEDED;
        }
 
-       nh = &res.fi->fib_nh[res.nh_sel];
+       nhc = res.nhc;
 
        /* do not handle lwt encaps right now */
-       if (nh->fib_nh_lws)
+       if (nhc->nhc_lwtstate)
                return BPF_FIB_LKUP_RET_UNSUPP_LWT;
 
-       dev = nh->fib_nh_dev;
-       if (nh->fib_nh_gw4)
-               params->ipv4_dst = nh->fib_nh_gw4;
+       dev = nhc->nhc_dev;
 
        params->rt_metric = res.fi->fib_priority;
 
        /* xdp and cls_bpf programs are run in RCU-bh so
         * rcu_read_lock_bh is not needed here
         */
-       neigh = __ipv4_neigh_lookup_noref(dev, (__force u32)params->ipv4_dst);
+       if (likely(nhc->nhc_gw_family != AF_INET6)) {
+               if (nhc->nhc_gw_family)
+                       params->ipv4_dst = nhc->nhc_gw.ipv4;
+
+               neigh = __ipv4_neigh_lookup_noref(dev,
+                                                (__force u32)params->ipv4_dst);
+       } else {
+               struct in6_addr *dst = (struct in6_addr *)params->ipv6_dst;
+
+               params->family = AF_INET6;
+               *dst = nhc->nhc_gw.ipv6;
+               neigh = __ipv6_neigh_lookup_noref_stub(dev, dst);
+       }
+
        if (!neigh)
                return BPF_FIB_LKUP_RET_NO_NEIGH;
 
@@ -4662,12 +4679,12 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
        struct in6_addr *src = (struct in6_addr *) params->ipv6_src;
        struct in6_addr *dst = (struct in6_addr *) params->ipv6_dst;
        struct neighbour *neigh;
+       struct fib6_result res;
        struct net_device *dev;
        struct inet6_dev *idev;
-       struct fib6_info *f6i;
        struct flowi6 fl6;
        int strict = 0;
-       int oif;
+       int oif, err;
        u32 mtu;
 
        /* link local addresses are never forwarded */
@@ -4709,61 +4726,57 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
                if (unlikely(!tb))
                        return BPF_FIB_LKUP_RET_NOT_FWDED;
 
-               f6i = ipv6_stub->fib6_table_lookup(net, tb, oif, &fl6, strict);
+               err = ipv6_stub->fib6_table_lookup(net, tb, oif, &fl6, &res,
+                                                  strict);
        } else {
                fl6.flowi6_mark = 0;
                fl6.flowi6_secid = 0;
                fl6.flowi6_tun_key.tun_id = 0;
                fl6.flowi6_uid = sock_net_uid(net, NULL);
 
-               f6i = ipv6_stub->fib6_lookup(net, oif, &fl6, strict);
+               err = ipv6_stub->fib6_lookup(net, oif, &fl6, &res, strict);
        }
 
-       if (unlikely(IS_ERR_OR_NULL(f6i) || f6i == net->ipv6.fib6_null_entry))
+       if (unlikely(err || IS_ERR_OR_NULL(res.f6i) ||
+                    res.f6i == net->ipv6.fib6_null_entry))
                return BPF_FIB_LKUP_RET_NOT_FWDED;
 
-       if (unlikely(f6i->fib6_flags & RTF_REJECT)) {
-               switch (f6i->fib6_type) {
-               case RTN_BLACKHOLE:
-                       return BPF_FIB_LKUP_RET_BLACKHOLE;
-               case RTN_UNREACHABLE:
-                       return BPF_FIB_LKUP_RET_UNREACHABLE;
-               case RTN_PROHIBIT:
-                       return BPF_FIB_LKUP_RET_PROHIBIT;
-               default:
-                       return BPF_FIB_LKUP_RET_NOT_FWDED;
-               }
-       }
-
-       if (f6i->fib6_type != RTN_UNICAST)
+       switch (res.fib6_type) {
+       /* only unicast is forwarded */
+       case RTN_UNICAST:
+               break;
+       case RTN_BLACKHOLE:
+               return BPF_FIB_LKUP_RET_BLACKHOLE;
+       case RTN_UNREACHABLE:
+               return BPF_FIB_LKUP_RET_UNREACHABLE;
+       case RTN_PROHIBIT:
+               return BPF_FIB_LKUP_RET_PROHIBIT;
+       default:
                return BPF_FIB_LKUP_RET_NOT_FWDED;
+       }
 
-       if (f6i->fib6_nsiblings && fl6.flowi6_oif == 0)
-               f6i = ipv6_stub->fib6_multipath_select(net, f6i, &fl6,
-                                                      fl6.flowi6_oif, NULL,
-                                                      strict);
+       ipv6_stub->fib6_select_path(net, &res, &fl6, fl6.flowi6_oif,
+                                   fl6.flowi6_oif != 0, NULL, strict);
 
        if (check_mtu) {
-               mtu = ipv6_stub->ip6_mtu_from_fib6(f6i, dst, src);
+               mtu = ipv6_stub->ip6_mtu_from_fib6(&res, dst, src);
                if (params->tot_len > mtu)
                        return BPF_FIB_LKUP_RET_FRAG_NEEDED;
        }
 
-       if (f6i->fib6_nh.fib_nh_lws)
+       if (res.nh->fib_nh_lws)
                return BPF_FIB_LKUP_RET_UNSUPP_LWT;
 
-       if (f6i->fib6_nh.fib_nh_has_gw)
-               *dst = f6i->fib6_nh.fib_nh_gw6;
+       if (res.nh->fib_nh_gw_family)
+               *dst = res.nh->fib_nh_gw6;
 
-       dev = f6i->fib6_nh.fib_nh_dev;
-       params->rt_metric = f6i->fib6_metric;
+       dev = res.nh->fib_nh_dev;
+       params->rt_metric = res.f6i->fib6_metric;
 
        /* xdp and cls_bpf programs are run in RCU-bh so rcu_read_lock_bh is
-        * not needed here. Can not use __ipv6_neigh_lookup_noref here
-        * because we need to get nd_tbl via the stub
+        * not needed here.
         */
-       neigh = ___neigh_lookup_noref(ipv6_stub->nd_tbl, neigh_key_eq128,
-                                     ndisc_hashfn, dst, dev);
+       neigh = __ipv6_neigh_lookup_noref_stub(dev, dst);
        if (!neigh)
                return BPF_FIB_LKUP_RET_NO_NEIGH;
 
@@ -6862,14 +6875,8 @@ static bool flow_dissector_is_valid_access(int off, int size,
                                           const struct bpf_prog *prog,
                                           struct bpf_insn_access_aux *info)
 {
-       if (type == BPF_WRITE) {
-               switch (off) {
-               case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
-                       break;
-               default:
-                       return false;
-               }
-       }
+       if (type == BPF_WRITE)
+               return false;
 
        switch (off) {
        case bpf_ctx_range(struct __sk_buff, data):
@@ -6881,11 +6888,7 @@ static bool flow_dissector_is_valid_access(int off, int size,
        case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
                info->reg_type = PTR_TO_FLOW_KEYS;
                break;
-       case bpf_ctx_range(struct __sk_buff, tc_classid):
-       case bpf_ctx_range(struct __sk_buff, data_meta):
-       case bpf_ctx_range_till(struct __sk_buff, family, local_port):
-       case bpf_ctx_range(struct __sk_buff, tstamp):
-       case bpf_ctx_range(struct __sk_buff, wire_len):
+       default:
                return false;
        }
 
index b4d581134ef203030d72df2462e837297cff7d56..795449713ba4a100f0260fa9c08177efbe6fe9fd 100644 (file)
@@ -707,6 +707,7 @@ bool __skb_flow_bpf_dissect(struct bpf_prog *prog,
        /* Pass parameters to the BPF program */
        memset(flow_keys, 0, sizeof(*flow_keys));
        cb->qdisc_cb.flow_keys = flow_keys;
+       flow_keys->n_proto = skb->protocol;
        flow_keys->nhoff = skb_network_offset(skb);
        flow_keys->thoff = flow_keys->nhoff;
 
@@ -716,7 +717,8 @@ bool __skb_flow_bpf_dissect(struct bpf_prog *prog,
        /* Restore state */
        memcpy(cb, &cb_saved, sizeof(cb_saved));
 
-       flow_keys->nhoff = clamp_t(u16, flow_keys->nhoff, 0, skb->len);
+       flow_keys->nhoff = clamp_t(u16, flow_keys->nhoff,
+                                  skb_network_offset(skb), skb->len);
        flow_keys->thoff = clamp_t(u16, flow_keys->thoff,
                                   flow_keys->nhoff, skb->len);
 
index ac679f74ba4754e69c51dd9b3fb9b23611782300..9bf1b9ad17806dfaa579317408e5c4707d014cc0 100644 (file)
@@ -291,6 +291,7 @@ __gnet_stats_copy_queue_cpu(struct gnet_stats_queue *qstats,
        for_each_possible_cpu(i) {
                const struct gnet_stats_queue *qcpu = per_cpu_ptr(q, i);
 
+               qstats->qlen = 0;
                qstats->backlog += qcpu->backlog;
                qstats->drops += qcpu->drops;
                qstats->requeues += qcpu->requeues;
@@ -306,6 +307,7 @@ void __gnet_stats_copy_queue(struct gnet_stats_queue *qstats,
        if (cpu) {
                __gnet_stats_copy_queue_cpu(qstats, cpu);
        } else {
+               qstats->qlen = q->qlen;
                qstats->backlog = q->backlog;
                qstats->drops = q->drops;
                qstats->requeues = q->requeues;
index 30f6fd8f68e0dc42801686ede3886f366ee1732b..997cfa8f99ba9c1f259e3958e27cf5392e41745c 100644 (file)
@@ -1920,6 +1920,11 @@ static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh,
                goto out;
        }
 
+       if (tbl->allow_add && !tbl->allow_add(dev, extack)) {
+               err = -EINVAL;
+               goto out;
+       }
+
        neigh = neigh_lookup(tbl, dst, dev);
        if (neigh == NULL) {
                bool exempt_from_gc;
index c14f0dc0157c77c879553c178cd395404a967c6c..e4fd68389d6f39adb34e383617d5f06c11a84ab1 100644 (file)
@@ -1747,20 +1747,16 @@ int netdev_register_kobject(struct net_device *ndev)
 
        error = device_add(dev);
        if (error)
-               goto error_put_device;
+               return error;
 
        error = register_queue_kobjects(ndev);
-       if (error)
-               goto error_device_del;
+       if (error) {
+               device_del(dev);
+               return error;
+       }
 
        pm_runtime_set_memalloc_noio(dev, true);
 
-       return 0;
-
-error_device_del:
-       device_del(dev);
-error_put_device:
-       put_device(dev);
        return error;
 }
 
index 17f36317363d19dcdeb6a6e75a116220b078c2b0..ebb5b6d21a13f6f2ede5237946815c76a8d391f8 100644 (file)
@@ -304,6 +304,7 @@ static __net_init int setup_net(struct net *net, struct user_namespace *user_ns)
 
        refcount_set(&net->count, 1);
        refcount_set(&net->passive, 1);
+       get_random_bytes(&net->hash_mix, sizeof(u32));
        net->dev_base_seq = 1;
        net->user_ns = user_ns;
        idr_init(&net->netns_ids);
@@ -838,7 +839,7 @@ static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh,
                peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD]));
                nla = tb[NETNSA_FD];
        } else if (tb[NETNSA_NSID]) {
-               peer = get_net_ns_by_id(net, nla_get_u32(tb[NETNSA_NSID]));
+               peer = get_net_ns_by_id(net, nla_get_s32(tb[NETNSA_NSID]));
                if (!peer)
                        peer = ERR_PTR(-ENOENT);
                nla = tb[NETNSA_NSID];
index 703cf76aa7c2dee7c5b556f5f035c015780f55f0..7109c168b5e0fb20b8b6ad8951893b181803fad8 100644 (file)
@@ -185,9 +185,10 @@ void __init ptp_classifier_init(void)
                { 0x16,  0,  0, 0x00000000 },
                { 0x06,  0,  0, 0x00000000 },
        };
-       struct sock_fprog_kern ptp_prog = {
-               .len = ARRAY_SIZE(ptp_filter), .filter = ptp_filter,
-       };
+       struct sock_fprog_kern ptp_prog;
+
+       ptp_prog.len = ARRAY_SIZE(ptp_filter);
+       ptp_prog.filter = ptp_filter;
 
        BUG_ON(bpf_prog_create(&ptp_insns, &ptp_prog));
 }
index f9b964fd4e4d1ed2cc5f8ea9627d082eefb89417..5fa5bf3e9945babef497d30396cb8f7b9832ee3b 100644 (file)
@@ -4951,7 +4951,7 @@ static int rtnl_valid_stats_req(const struct nlmsghdr *nlh, bool strict_check,
 {
        struct if_stats_msg *ifsm;
 
-       if (nlh->nlmsg_len < sizeof(*ifsm)) {
+       if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifsm))) {
                NL_SET_ERR_MSG(extack, "Invalid header for stats dump");
                return -EINVAL;
        }
index 4782f9354dd18fa8c55e02b79cd633199d189006..a083e188374f5d93c37449871fc3c157593d7720 100644 (file)
@@ -3800,7 +3800,7 @@ int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
        unsigned int delta_truesize;
        struct sk_buff *lp;
 
-       if (unlikely(p->len + len >= 65536))
+       if (unlikely(p->len + len >= 65536 || NAPI_GRO_CB(skb)->flush))
                return -E2BIG;
 
        lp = NAPI_GRO_CB(p)->last;
@@ -5082,7 +5082,8 @@ EXPORT_SYMBOL_GPL(skb_gso_validate_mac_len);
 
 static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
 {
-       int mac_len;
+       int mac_len, meta_len;
+       void *meta;
 
        if (skb_cow(skb, skb_headroom(skb)) < 0) {
                kfree_skb(skb);
@@ -5094,6 +5095,13 @@ static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
                memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb),
                        mac_len - VLAN_HLEN - ETH_TLEN);
        }
+
+       meta_len = skb_metadata_len(skb);
+       if (meta_len) {
+               meta = skb_metadata_end(skb) - meta_len;
+               memmove(meta + VLAN_HLEN, meta, meta_len);
+       }
+
        skb->mac_header += VLAN_HLEN;
        return skb;
 }
index 782343bb925b643348cc906a70b97caa0388178d..067878a1e4c51363e065e13ccdb2b9d03c6a9c5f 100644 (file)
@@ -348,7 +348,7 @@ static int sock_get_timeout(long timeo, void *optval, bool old_timeval)
                tv.tv_usec = ((timeo % HZ) * USEC_PER_SEC) / HZ;
        }
 
-       if (in_compat_syscall() && !COMPAT_USE_64BIT_TIME) {
+       if (old_timeval && in_compat_syscall() && !COMPAT_USE_64BIT_TIME) {
                struct old_timeval32 tv32 = { tv.tv_sec, tv.tv_usec };
                *(struct old_timeval32 *)optval = tv32;
                return sizeof(tv32);
@@ -372,7 +372,7 @@ static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen, bool
 {
        struct __kernel_sock_timeval tv;
 
-       if (in_compat_syscall() && !COMPAT_USE_64BIT_TIME) {
+       if (old_timeval && in_compat_syscall() && !COMPAT_USE_64BIT_TIME) {
                struct old_timeval32 tv32;
 
                if (optlen < sizeof(tv32))
index f227f002c73d382fecd98c8857ce4c9139cb7a8a..db87d9f5801983913e66549e5d5911ead10f3ac1 100644 (file)
@@ -738,7 +738,12 @@ static int __feat_register_sp(struct list_head *fn, u8 feat, u8 is_local,
        if (dccp_feat_clone_sp_val(&fval, sp_val, sp_len))
                return -ENOMEM;
 
-       return dccp_feat_push_change(fn, feat, is_local, mandatory, &fval);
+       if (dccp_feat_push_change(fn, feat, is_local, mandatory, &fval)) {
+               kfree(fval.sp.vec);
+               return -ENOMEM;
+       }
+
+       return 0;
 }
 
 /**
index bdccc46a2921924cd948d4fdc5ccbd04ef63f5a7..c1fa4785c4c255a04023e35e88eb2e15c96f6a3d 100644 (file)
@@ -444,7 +444,7 @@ static void dn_destruct(struct sock *sk)
        skb_queue_purge(&scp->other_xmit_queue);
        skb_queue_purge(&scp->other_receive_queue);
 
-       dst_release(rcu_dereference_check(sk->sk_dst_cache, 1));
+       dst_release(rcu_dereference_protected(sk->sk_dst_cache, 1));
 }
 
 static unsigned long dn_memory_pressure;
index 76338c38738a748aeaa899ebf30247c0f69741a8..19aa32fc1802b44a971eecd6f7b9f967ec2900d9 100644 (file)
@@ -94,8 +94,6 @@ int dns_query(const char *type, const char *name, size_t namelen,
                desclen += typelen + 1;
        }
 
-       if (!namelen)
-               namelen = strnlen(name, 256);
        if (namelen < 3 || namelen > 255)
                return -EINVAL;
        desclen += namelen + 1;
index fe0a6197db9c5f8ab2e5ff4b2d2271dc212b6d76..d122f1bcdab2d054a829f6a0c4149c759be13994 100644 (file)
@@ -260,12 +260,14 @@ static int dsa_port_setup(struct dsa_port *dp)
 {
        enum devlink_port_flavour flavour;
        struct dsa_switch *ds = dp->ds;
+       struct dsa_switch_tree *dst = ds->dst;
        int err;
 
        if (dp->type == DSA_PORT_TYPE_UNUSED)
                return 0;
 
        memset(&dp->devlink_port, 0, sizeof(dp->devlink_port));
+       dp->mac = of_get_mac_address(dp->dn);
 
        switch (dp->type) {
        case DSA_PORT_TYPE_CPU:
@@ -285,7 +287,8 @@ static int dsa_port_setup(struct dsa_port *dp)
         * independent from front panel port numbers.
         */
        devlink_port_attrs_set(&dp->devlink_port, flavour,
-                              dp->index, false, 0);
+                              dp->index, false, 0,
+                              (const char *) &dst->index, sizeof(dst->index));
        err = devlink_port_register(ds->devlink, &dp->devlink_port,
                                    dp->index);
        if (err)
index 80be8e86c82df5da1edd984cdbcde09d27cfe68d..ce26dddc82707a6e90a1b8e02d8f175d34201ae8 100644 (file)
@@ -379,6 +379,13 @@ static int dsa_slave_get_port_parent_id(struct net_device *dev,
        struct dsa_switch *ds = dp->ds;
        struct dsa_switch_tree *dst = ds->dst;
 
+       /* For non-legacy ports, devlink is used and it takes
+        * care of the name generation. This ndo implementation
+        * should be removed with legacy support.
+        */
+       if (dp->ds->devlink)
+               return -EOPNOTSUPP;
+
        ppid->id_len = sizeof(dst->index);
        memcpy(&ppid->id, &dst->index, ppid->id_len);
 
@@ -1393,7 +1400,10 @@ int dsa_slave_create(struct dsa_port *port)
                                NETIF_F_HW_VLAN_CTAG_FILTER;
        slave_dev->hw_features |= NETIF_F_HW_TC;
        slave_dev->ethtool_ops = &dsa_slave_ethtool_ops;
-       eth_hw_addr_inherit(slave_dev, master);
+       if (port->mac && is_valid_ether_addr(port->mac))
+               ether_addr_copy(slave_dev->dev_addr, port->mac);
+       else
+               eth_hw_addr_inherit(slave_dev, master);
        slave_dev->priv_flags |= IFF_NO_QUEUE;
        slave_dev->netdev_ops = &dsa_slave_netdev_ops;
        slave_dev->min_mtu = 0;
index ed4f6dc26365baa3e9988b2f11ac26d8ffeb55b7..85c22ada47449d580ee2a175c0729f4bad2cad61 100644 (file)
@@ -98,8 +98,18 @@ static struct sk_buff *qca_tag_rcv(struct sk_buff *skb, struct net_device *dev,
        return skb;
 }
 
+static int qca_tag_flow_dissect(const struct sk_buff *skb, __be16 *proto,
+                                int *offset)
+{
+       *offset = QCA_HDR_LEN;
+       *proto = ((__be16 *)skb->data)[0];
+
+       return 0;
+}
+
 const struct dsa_device_ops qca_netdev_ops = {
        .xmit   = qca_tag_xmit,
        .rcv    = qca_tag_rcv,
+       .flow_dissect = qca_tag_flow_dissect,
        .overhead = QCA_HDR_LEN,
 };
index 9ae972a820f4683efb16eb8fd2c5d69e75c12049..e45757fc477f0b26092e4bb5fe8f1cccd3ea733a 100644 (file)
@@ -6,3 +6,4 @@ obj-$(CONFIG_HSR)       += hsr.o
 
 hsr-y                  := hsr_main.o hsr_framereg.o hsr_device.o \
                           hsr_netlink.o hsr_slave.o hsr_forward.o
+hsr-$(CONFIG_DEBUG_FS) += hsr_debugfs.o
diff --git a/net/hsr/hsr_debugfs.c b/net/hsr/hsr_debugfs.c
new file mode 100644 (file)
index 0000000..9444797
--- /dev/null
@@ -0,0 +1,119 @@
+/*
+ * hsr_debugfs code
+ * Copyright (C) 2019 Texas Instruments Incorporated
+ *
+ * Author(s):
+ *     Murali Karicheri <m-karicheri2@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/debugfs.h>
+#include "hsr_main.h"
+#include "hsr_framereg.h"
+
+static void print_mac_address(struct seq_file *sfp, unsigned char *mac)
+{
+       seq_printf(sfp, "%02x:%02x:%02x:%02x:%02x:%02x:",
+                  mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
+}
+
+/* hsr_node_table_show - Formats and prints node_table entries */
+static int
+hsr_node_table_show(struct seq_file *sfp, void *data)
+{
+       struct hsr_priv *priv = (struct hsr_priv *)sfp->private;
+       struct hsr_node *node;
+
+       seq_puts(sfp, "Node Table entries\n");
+       seq_puts(sfp, "MAC-Address-A,   MAC-Address-B, time_in[A], ");
+       seq_puts(sfp, "time_in[B], Address-B port\n");
+       rcu_read_lock();
+       list_for_each_entry_rcu(node, &priv->node_db, mac_list) {
+               /* skip self node */
+               if (hsr_addr_is_self(priv, node->macaddress_A))
+                       continue;
+               print_mac_address(sfp, &node->macaddress_A[0]);
+               seq_puts(sfp, " ");
+               print_mac_address(sfp, &node->macaddress_B[0]);
+               seq_printf(sfp, "0x%lx, ", node->time_in[HSR_PT_SLAVE_A]);
+               seq_printf(sfp, "0x%lx ", node->time_in[HSR_PT_SLAVE_B]);
+               seq_printf(sfp, "0x%x\n", node->addr_B_port);
+       }
+       rcu_read_unlock();
+       return 0;
+}
+
+/* hsr_node_table_open - Open the node_table file
+ *
+ * Description:
+ * This routine opens a debugfs file node_table of specific hsr device
+ */
+static int
+hsr_node_table_open(struct inode *inode, struct file *filp)
+{
+       return single_open(filp, hsr_node_table_show, inode->i_private);
+}
+
+static const struct file_operations hsr_fops = {
+       .owner  = THIS_MODULE,
+       .open   = hsr_node_table_open,
+       .read   = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release,
+};
+
+/* hsr_debugfs_init - create hsr node_table file for dumping
+ * the node table
+ *
+ * Description:
+ * When debugfs is configured this routine sets up the node_table file per
+ * hsr device for dumping the node_table entries
+ */
+int hsr_debugfs_init(struct hsr_priv *priv, struct net_device *hsr_dev)
+{
+       int rc = -1;
+       struct dentry *de = NULL;
+
+       de = debugfs_create_dir(hsr_dev->name, NULL);
+       if (!de) {
+               pr_err("Cannot create hsr debugfs root\n");
+               return rc;
+       }
+
+       priv->node_tbl_root = de;
+
+       de = debugfs_create_file("node_table", S_IFREG | 0444,
+                                priv->node_tbl_root, priv,
+                                &hsr_fops);
+       if (!de) {
+               pr_err("Cannot create hsr node_table directory\n");
+               return rc;
+       }
+       priv->node_tbl_file = de;
+
+       return 0;
+}
+
+/* hsr_debugfs_term - Tear down debugfs intrastructure
+ *
+ * Description:
+ * When Debufs is configured this routine removes debugfs file system
+ * elements that are specific to hsr
+ */
+void
+hsr_debugfs_term(struct hsr_priv *priv)
+{
+       debugfs_remove(priv->node_tbl_file);
+       priv->node_tbl_file = NULL;
+       debugfs_remove(priv->node_tbl_root);
+       priv->node_tbl_root = NULL;
+}
index a97bf326b231fe05c7562ca49279fc3fda9d53c0..15c72065df79103fa78ee47db8f076a62529b055 100644 (file)
@@ -1,9 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright 2011-2014 Autronica Fire and Security AS
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
  *
  * Author(s):
  *     2011-2014 Arvid Brodin, arvid.brodin@alten.se
@@ -23,7 +19,6 @@
 #include "hsr_main.h"
 #include "hsr_forward.h"
 
-
 static bool is_admin_up(struct net_device *dev)
 {
        return dev && (dev->flags & IFF_UP);
@@ -68,7 +63,7 @@ static bool hsr_check_carrier(struct hsr_port *master)
 
        rcu_read_lock();
        hsr_for_each_port(master->hsr, port)
-               if ((port->type != HSR_PT_MASTER) && is_slave_up(port->dev)) {
+               if (port->type != HSR_PT_MASTER && is_slave_up(port->dev)) {
                        has_carrier = true;
                        break;
                }
@@ -82,7 +77,6 @@ static bool hsr_check_carrier(struct hsr_port *master)
        return has_carrier;
 }
 
-
 static void hsr_check_announce(struct net_device *hsr_dev,
                               unsigned char old_operstate)
 {
@@ -90,15 +84,14 @@ static void hsr_check_announce(struct net_device *hsr_dev,
 
        hsr = netdev_priv(hsr_dev);
 
-       if ((hsr_dev->operstate == IF_OPER_UP)
-                       && (old_operstate != IF_OPER_UP)) {
+       if (hsr_dev->operstate == IF_OPER_UP && old_operstate != IF_OPER_UP) {
                /* Went up */
                hsr->announce_count = 0;
                mod_timer(&hsr->announce_timer,
                          jiffies + msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL));
        }
 
-       if ((hsr_dev->operstate != IF_OPER_UP) && (old_operstate == IF_OPER_UP))
+       if (hsr_dev->operstate != IF_OPER_UP && old_operstate == IF_OPER_UP)
                /* Went down */
                del_timer(&hsr->announce_timer);
 }
@@ -136,7 +129,6 @@ int hsr_get_max_mtu(struct hsr_priv *hsr)
        return mtu_max - HSR_HLEN;
 }
 
-
 static int hsr_dev_change_mtu(struct net_device *dev, int new_mtu)
 {
        struct hsr_priv *hsr;
@@ -191,14 +183,12 @@ static int hsr_dev_open(struct net_device *dev)
        return 0;
 }
 
-
 static int hsr_dev_close(struct net_device *dev)
 {
        /* Nothing to do here. */
        return 0;
 }
 
-
 static netdev_features_t hsr_features_recompute(struct hsr_priv *hsr,
                                                netdev_features_t features)
 {
@@ -231,7 +221,6 @@ static netdev_features_t hsr_fix_features(struct net_device *dev,
        return hsr_features_recompute(hsr, features);
 }
 
-
 static int hsr_dev_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct hsr_priv *hsr = netdev_priv(dev);
@@ -244,14 +233,13 @@ static int hsr_dev_xmit(struct sk_buff *skb, struct net_device *dev)
        return NETDEV_TX_OK;
 }
 
-
 static const struct header_ops hsr_header_ops = {
        .create  = eth_header,
        .parse   = eth_header_parse,
 };
 
 static void send_hsr_supervision_frame(struct hsr_port *master,
-               u8 type, u8 hsrVer)
+                                      u8 type, u8 hsr_ver)
 {
        struct sk_buff *skb;
        int hlen, tlen;
@@ -262,39 +250,38 @@ static void send_hsr_supervision_frame(struct hsr_port *master,
 
        hlen = LL_RESERVED_SPACE(master->dev);
        tlen = master->dev->needed_tailroom;
-       skb = dev_alloc_skb(
-                       sizeof(struct hsr_tag) +
-                       sizeof(struct hsr_sup_tag) +
-                       sizeof(struct hsr_sup_payload) + hlen + tlen);
+       skb = dev_alloc_skb(sizeof(struct hsr_tag) +
+                           sizeof(struct hsr_sup_tag) +
+                           sizeof(struct hsr_sup_payload) + hlen + tlen);
 
-       if (skb == NULL)
+       if (!skb)
                return;
 
        skb_reserve(skb, hlen);
 
        skb->dev = master->dev;
-       skb->protocol = htons(hsrVer ? ETH_P_HSR : ETH_P_PRP);
+       skb->protocol = htons(hsr_ver ? ETH_P_HSR : ETH_P_PRP);
        skb->priority = TC_PRIO_CONTROL;
 
-       if (dev_hard_header(skb, skb->dev, (hsrVer ? ETH_P_HSR : ETH_P_PRP),
+       if (dev_hard_header(skb, skb->dev, (hsr_ver ? ETH_P_HSR : ETH_P_PRP),
                            master->hsr->sup_multicast_addr,
                            skb->dev->dev_addr, skb->len) <= 0)
                goto out;
        skb_reset_mac_header(skb);
 
-       if (hsrVer > 0) {
+       if (hsr_ver > 0) {
                hsr_tag = skb_put(skb, sizeof(struct hsr_tag));
                hsr_tag->encap_proto = htons(ETH_P_PRP);
                set_hsr_tag_LSDU_size(hsr_tag, HSR_V1_SUP_LSDUSIZE);
        }
 
        hsr_stag = skb_put(skb, sizeof(struct hsr_sup_tag));
-       set_hsr_stag_path(hsr_stag, (hsrVer ? 0x0 : 0xf));
-       set_hsr_stag_HSR_Ver(hsr_stag, hsrVer);
+       set_hsr_stag_path(hsr_stag, (hsr_ver ? 0x0 : 0xf));
+       set_hsr_stag_HSR_ver(hsr_stag, hsr_ver);
 
        /* From HSRv1 on we have separate supervision sequence numbers. */
        spin_lock_irqsave(&master->hsr->seqnr_lock, irqflags);
-       if (hsrVer > 0) {
+       if (hsr_ver > 0) {
                hsr_stag->sequence_nr = htons(master->hsr->sup_sequence_nr);
                hsr_tag->sequence_nr = htons(master->hsr->sequence_nr);
                master->hsr->sup_sequence_nr++;
@@ -305,13 +292,14 @@ static void send_hsr_supervision_frame(struct hsr_port *master,
        }
        spin_unlock_irqrestore(&master->hsr->seqnr_lock, irqflags);
 
-       hsr_stag->HSR_TLV_Type = type;
+       hsr_stag->HSR_TLV_type = type;
        /* TODO: Why 12 in HSRv0? */
-       hsr_stag->HSR_TLV_Length = hsrVer ? sizeof(struct hsr_sup_payload) : 12;
+       hsr_stag->HSR_TLV_length =
+                               hsr_ver ? sizeof(struct hsr_sup_payload) : 12;
 
        /* Payload: MacAddressA */
        hsr_sp = skb_put(skb, sizeof(struct hsr_sup_payload));
-       ether_addr_copy(hsr_sp->MacAddressA, master->dev->dev_addr);
+       ether_addr_copy(hsr_sp->macaddress_A, master->dev->dev_addr);
 
        if (skb_put_padto(skb, ETH_ZLEN + HSR_HLEN))
                return;
@@ -324,7 +312,6 @@ static void send_hsr_supervision_frame(struct hsr_port *master,
        kfree_skb(skb);
 }
 
-
 /* Announce (supervision frame) timer function
  */
 static void hsr_announce(struct timer_list *t)
@@ -338,15 +325,15 @@ static void hsr_announce(struct timer_list *t)
        rcu_read_lock();
        master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
 
-       if (hsr->announce_count < 3 && hsr->protVersion == 0) {
+       if (hsr->announce_count < 3 && hsr->prot_version == 0) {
                send_hsr_supervision_frame(master, HSR_TLV_ANNOUNCE,
-                               hsr->protVersion);
+                                          hsr->prot_version);
                hsr->announce_count++;
 
                interval = msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL);
        } else {
                send_hsr_supervision_frame(master, HSR_TLV_LIFE_CHECK,
-                               hsr->protVersion);
+                                          hsr->prot_version);
 
                interval = msecs_to_jiffies(HSR_LIFE_CHECK_INTERVAL);
        }
@@ -357,7 +344,6 @@ static void hsr_announce(struct timer_list *t)
        rcu_read_unlock();
 }
 
-
 /* According to comments in the declaration of struct net_device, this function
  * is "Called from unregister, can be used to call free_netdev". Ok then...
  */
@@ -368,6 +354,8 @@ static void hsr_dev_destroy(struct net_device *hsr_dev)
 
        hsr = netdev_priv(hsr_dev);
 
+       hsr_debugfs_term(hsr);
+
        rtnl_lock();
        hsr_for_each_port(hsr, port)
                hsr_del_port(port);
@@ -423,7 +411,6 @@ void hsr_dev_setup(struct net_device *dev)
        dev->features |= NETIF_F_NETNS_LOCAL;
 }
 
-
 /* Return true if dev is a HSR master; return false otherwise.
  */
 inline bool is_hsr_master(struct net_device *dev)
@@ -467,7 +454,7 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
        ether_addr_copy(hsr->sup_multicast_addr, def_multicast_addr);
        hsr->sup_multicast_addr[ETH_ALEN - 1] = multicast_spec;
 
-       hsr->protVersion = protocol_version;
+       hsr->prot_version = protocol_version;
 
        /* FIXME: should I modify the value of these?
         *
@@ -498,6 +485,9 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
                goto fail;
 
        mod_timer(&hsr->prune_timer, jiffies + msecs_to_jiffies(PRUNE_PERIOD));
+       res = hsr_debugfs_init(hsr, hsr_dev);
+       if (res)
+               goto fail;
 
        return 0;
 
index 9975e31bbb827b7f7617372a992e596addaaf6c6..6d7759c4f5f98cc0b00a4287bfe03dea066dae5b 100644 (file)
@@ -1,9 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright 2011-2014 Autronica Fire and Security AS
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
  *
  * Author(s):
  *     2011-2014 Arvid Brodin, arvid.brodin@alten.se
index 04b5450c5a5572e875f7900a3676fd80259b9b4b..ddd9605bad04cbe35c5e247a3f8674c3428624ee 100644 (file)
@@ -1,9 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright 2011-2014 Autronica Fire and Security AS
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
  *
  * Author(s):
  *     2011-2014 Arvid Brodin, arvid.brodin@alten.se
@@ -17,7 +13,6 @@
 #include "hsr_main.h"
 #include "hsr_framereg.h"
 
-
 struct hsr_node;
 
 struct hsr_frame_info {
@@ -32,7 +27,6 @@ struct hsr_frame_info {
        bool is_local_exclusive;
 };
 
-
 /* The uses I can see for these HSR supervision frames are:
  * 1) Use the frames that are sent after node initialization ("HSR_TLV.Type =
  *    22") to reset any sequence_nr counters belonging to that node. Useful if
@@ -50,46 +44,45 @@ struct hsr_frame_info {
  */
 static bool is_supervision_frame(struct hsr_priv *hsr, struct sk_buff *skb)
 {
-       struct ethhdr *ethHdr;
-       struct hsr_sup_tag *hsrSupTag;
-       struct hsrv1_ethhdr_sp *hsrV1Hdr;
+       struct ethhdr *eth_hdr;
+       struct hsr_sup_tag *hsr_sup_tag;
+       struct hsrv1_ethhdr_sp *hsr_V1_hdr;
 
        WARN_ON_ONCE(!skb_mac_header_was_set(skb));
-       ethHdr = (struct ethhdr *) skb_mac_header(skb);
+       eth_hdr = (struct ethhdr *)skb_mac_header(skb);
 
        /* Correct addr? */
-       if (!ether_addr_equal(ethHdr->h_dest,
+       if (!ether_addr_equal(eth_hdr->h_dest,
                              hsr->sup_multicast_addr))
                return false;
 
        /* Correct ether type?. */
-       if (!(ethHdr->h_proto == htons(ETH_P_PRP)
-                       || ethHdr->h_proto == htons(ETH_P_HSR)))
+       if (!(eth_hdr->h_proto == htons(ETH_P_PRP) ||
+             eth_hdr->h_proto == htons(ETH_P_HSR)))
                return false;
 
        /* Get the supervision header from correct location. */
-       if (ethHdr->h_proto == htons(ETH_P_HSR)) { /* Okay HSRv1. */
-               hsrV1Hdr = (struct hsrv1_ethhdr_sp *) skb_mac_header(skb);
-               if (hsrV1Hdr->hsr.encap_proto != htons(ETH_P_PRP))
+       if (eth_hdr->h_proto == htons(ETH_P_HSR)) { /* Okay HSRv1. */
+               hsr_V1_hdr = (struct hsrv1_ethhdr_sp *)skb_mac_header(skb);
+               if (hsr_V1_hdr->hsr.encap_proto != htons(ETH_P_PRP))
                        return false;
 
-               hsrSupTag = &hsrV1Hdr->hsr_sup;
+               hsr_sup_tag = &hsr_V1_hdr->hsr_sup;
        } else {
-               hsrSupTag = &((struct hsrv0_ethhdr_sp *) skb_mac_header(skb))->hsr_sup;
+               hsr_sup_tag =
+                    &((struct hsrv0_ethhdr_sp *)skb_mac_header(skb))->hsr_sup;
        }
 
-       if ((hsrSupTag->HSR_TLV_Type != HSR_TLV_ANNOUNCE) &&
-           (hsrSupTag->HSR_TLV_Type != HSR_TLV_LIFE_CHECK))
+       if (hsr_sup_tag->HSR_TLV_type != HSR_TLV_ANNOUNCE &&
+           hsr_sup_tag->HSR_TLV_type != HSR_TLV_LIFE_CHECK)
                return false;
-       if ((hsrSupTag->HSR_TLV_Length != 12) &&
-                       (hsrSupTag->HSR_TLV_Length !=
-                                       sizeof(struct hsr_sup_payload)))
+       if (hsr_sup_tag->HSR_TLV_length != 12 &&
+           hsr_sup_tag->HSR_TLV_length != sizeof(struct hsr_sup_payload))
                return false;
 
        return true;
 }
 
-
 static struct sk_buff *create_stripped_skb(struct sk_buff *skb_in,
                                           struct hsr_frame_info *frame)
 {
@@ -100,7 +93,7 @@ static struct sk_buff *create_stripped_skb(struct sk_buff *skb_in,
        skb_pull(skb_in, HSR_HLEN);
        skb = __pskb_copy(skb_in, skb_headroom(skb_in) - HSR_HLEN, GFP_ATOMIC);
        skb_push(skb_in, HSR_HLEN);
-       if (skb == NULL)
+       if (!skb)
                return NULL;
 
        skb_reset_mac_header(skb);
@@ -108,7 +101,7 @@ static struct sk_buff *create_stripped_skb(struct sk_buff *skb_in,
        if (skb->ip_summed == CHECKSUM_PARTIAL)
                skb->csum_start -= HSR_HLEN;
 
-       copylen = 2*ETH_ALEN;
+       copylen = 2 * ETH_ALEN;
        if (frame->is_vlan)
                copylen += VLAN_HLEN;
        src = skb_mac_header(skb_in);
@@ -127,9 +120,8 @@ static struct sk_buff *frame_get_stripped_skb(struct hsr_frame_info *frame,
        return skb_clone(frame->skb_std, GFP_ATOMIC);
 }
 
-
 static void hsr_fill_tag(struct sk_buff *skb, struct hsr_frame_info *frame,
-                        struct hsr_port *port, u8 protoVersion)
+                        struct hsr_port *port, u8 proto_version)
 {
        struct hsr_ethhdr *hsr_ethhdr;
        int lane_id;
@@ -144,13 +136,13 @@ static void hsr_fill_tag(struct sk_buff *skb, struct hsr_frame_info *frame,
        if (frame->is_vlan)
                lsdu_size -= 4;
 
-       hsr_ethhdr = (struct hsr_ethhdr *) skb_mac_header(skb);
+       hsr_ethhdr = (struct hsr_ethhdr *)skb_mac_header(skb);
 
        set_hsr_tag_path(&hsr_ethhdr->hsr_tag, lane_id);
        set_hsr_tag_LSDU_size(&hsr_ethhdr->hsr_tag, lsdu_size);
        hsr_ethhdr->hsr_tag.sequence_nr = htons(frame->sequence_nr);
        hsr_ethhdr->hsr_tag.encap_proto = hsr_ethhdr->ethhdr.h_proto;
-       hsr_ethhdr->ethhdr.h_proto = htons(protoVersion ?
+       hsr_ethhdr->ethhdr.h_proto = htons(proto_version ?
                        ETH_P_HSR : ETH_P_PRP);
 }
 
@@ -164,7 +156,7 @@ static struct sk_buff *create_tagged_skb(struct sk_buff *skb_o,
 
        /* Create the new skb with enough headroom to fit the HSR tag */
        skb = __pskb_copy(skb_o, skb_headroom(skb_o) + HSR_HLEN, GFP_ATOMIC);
-       if (skb == NULL)
+       if (!skb)
                return NULL;
        skb_reset_mac_header(skb);
 
@@ -180,7 +172,7 @@ static struct sk_buff *create_tagged_skb(struct sk_buff *skb_o,
        memmove(dst, src, movelen);
        skb_reset_mac_header(skb);
 
-       hsr_fill_tag(skb, frame, port, port->hsr->protVersion);
+       hsr_fill_tag(skb, frame, port, port->hsr->prot_version);
 
        return skb;
 }
@@ -194,7 +186,7 @@ static struct sk_buff *frame_get_tagged_skb(struct hsr_frame_info *frame,
        if (frame->skb_hsr)
                return skb_clone(frame->skb_hsr, GFP_ATOMIC);
 
-       if ((port->type != HSR_PT_SLAVE_A) && (port->type != HSR_PT_SLAVE_B)) {
+       if (port->type != HSR_PT_SLAVE_A && port->type != HSR_PT_SLAVE_B) {
                WARN_ONCE(1, "HSR: Bug: trying to create a tagged frame for a non-ring port");
                return NULL;
        }
@@ -202,7 +194,6 @@ static struct sk_buff *frame_get_tagged_skb(struct hsr_frame_info *frame,
        return create_tagged_skb(frame->skb_std, frame, port);
 }
 
-
 static void hsr_deliver_master(struct sk_buff *skb, struct net_device *dev,
                               struct hsr_node *node_src)
 {
@@ -237,7 +228,6 @@ static int hsr_xmit(struct sk_buff *skb, struct hsr_port *port,
        return dev_queue_xmit(skb);
 }
 
-
 /* Forward the frame through all devices except:
  * - Back through the receiving device
  * - If it's a HSR frame: through a device where it has passed before
@@ -260,11 +250,11 @@ static void hsr_forward_do(struct hsr_frame_info *frame)
                        continue;
 
                /* Don't deliver locally unless we should */
-               if ((port->type == HSR_PT_MASTER) && !frame->is_local_dest)
+               if (port->type == HSR_PT_MASTER && !frame->is_local_dest)
                        continue;
 
                /* Deliver frames directly addressed to us to master only */
-               if ((port->type != HSR_PT_MASTER) && frame->is_local_exclusive)
+               if (port->type != HSR_PT_MASTER && frame->is_local_exclusive)
                        continue;
 
                /* Don't send frame over port where it has been sent before */
@@ -272,7 +262,7 @@ static void hsr_forward_do(struct hsr_frame_info *frame)
                                           frame->sequence_nr))
                        continue;
 
-               if (frame->is_supervision && (port->type == HSR_PT_MASTER)) {
+               if (frame->is_supervision && port->type == HSR_PT_MASTER) {
                        hsr_handle_sup_frame(frame->skb_hsr,
                                             frame->node_src,
                                             frame->port_rcv);
@@ -283,7 +273,7 @@ static void hsr_forward_do(struct hsr_frame_info *frame)
                        skb = frame_get_tagged_skb(frame, port);
                else
                        skb = frame_get_stripped_skb(frame, port);
-               if (skb == NULL) {
+               if (!skb) {
                        /* FIXME: Record the dropped frame? */
                        continue;
                }
@@ -296,7 +286,6 @@ static void hsr_forward_do(struct hsr_frame_info *frame)
        }
 }
 
-
 static void check_local_dest(struct hsr_priv *hsr, struct sk_buff *skb,
                             struct hsr_frame_info *frame)
 {
@@ -307,16 +296,15 @@ static void check_local_dest(struct hsr_priv *hsr, struct sk_buff *skb,
                frame->is_local_exclusive = false;
        }
 
-       if ((skb->pkt_type == PACKET_HOST) ||
-           (skb->pkt_type == PACKET_MULTICAST) ||
-           (skb->pkt_type == PACKET_BROADCAST)) {
+       if (skb->pkt_type == PACKET_HOST ||
+           skb->pkt_type == PACKET_MULTICAST ||
+           skb->pkt_type == PACKET_BROADCAST) {
                frame->is_local_dest = true;
        } else {
                frame->is_local_dest = false;
        }
 }
 
-
 static int hsr_fill_frame_info(struct hsr_frame_info *frame,
                               struct sk_buff *skb, struct hsr_port *port)
 {
@@ -325,18 +313,18 @@ static int hsr_fill_frame_info(struct hsr_frame_info *frame,
 
        frame->is_supervision = is_supervision_frame(port->hsr, skb);
        frame->node_src = hsr_get_node(port, skb, frame->is_supervision);
-       if (frame->node_src == NULL)
+       if (!frame->node_src)
                return -1; /* Unknown node and !is_supervision, or no mem */
 
-       ethhdr = (struct ethhdr *) skb_mac_header(skb);
+       ethhdr = (struct ethhdr *)skb_mac_header(skb);
        frame->is_vlan = false;
        if (ethhdr->h_proto == htons(ETH_P_8021Q)) {
                frame->is_vlan = true;
                /* FIXME: */
                WARN_ONCE(1, "HSR: VLAN not yet supported");
        }
-       if (ethhdr->h_proto == htons(ETH_P_PRP)
-                       || ethhdr->h_proto == htons(ETH_P_HSR)) {
+       if (ethhdr->h_proto == htons(ETH_P_PRP) ||
+           ethhdr->h_proto == htons(ETH_P_HSR)) {
                frame->skb_std = NULL;
                frame->skb_hsr = skb;
                frame->sequence_nr = hsr_get_skb_sequence_nr(skb);
@@ -371,10 +359,17 @@ void hsr_forward_skb(struct sk_buff *skb, struct hsr_port *port)
                goto out_drop;
        hsr_register_frame_in(frame.node_src, port, frame.sequence_nr);
        hsr_forward_do(&frame);
+       /* Gets called for ingress frames as well as egress from master port.
+        * So check and increment stats for master port only here.
+        */
+       if (port->type == HSR_PT_MASTER) {
+               port->dev->stats.tx_packets++;
+               port->dev->stats.tx_bytes += skb->len;
+       }
 
-       if (frame.skb_hsr != NULL)
+       if (frame.skb_hsr)
                kfree_skb(frame.skb_hsr);
-       if (frame.skb_std != NULL)
+       if (frame.skb_std)
                kfree_skb(frame.skb_std);
        return;
 
index 5c5bc4b6b75f8955cd2388a087a6fbea5ab7aae0..51a69295566cf683d2879493ab3f9857297941a2 100644 (file)
@@ -1,9 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright 2011-2014 Autronica Fire and Security AS
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
  *
  * Author(s):
  *     2011-2014 Arvid Brodin, arvid.brodin@alten.se
index 9af16cb68f76b92bd929d4d941a5e8c9a04d779e..9fa9abd83018a73e54b238343eb00fa00dd6d53b 100644 (file)
@@ -1,9 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright 2011-2014 Autronica Fire and Security AS
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
  *
  * Author(s):
  *     2011-2014 Arvid Brodin, arvid.brodin@alten.se
 #include "hsr_framereg.h"
 #include "hsr_netlink.h"
 
-
-struct hsr_node {
-       struct list_head        mac_list;
-       unsigned char           MacAddressA[ETH_ALEN];
-       unsigned char           MacAddressB[ETH_ALEN];
-       /* Local slave through which AddrB frames are received from this node */
-       enum hsr_port_type      AddrB_port;
-       unsigned long           time_in[HSR_PT_PORTS];
-       bool                    time_in_stale[HSR_PT_PORTS];
-       u16                     seq_out[HSR_PT_PORTS];
-       struct rcu_head         rcu_head;
-};
-
-
 /*     TODO: use hash lists for mac addresses (linux/jhash.h)?    */
 
-
 /* seq_nr_after(a, b) - return true if a is after (higher in sequence than) b,
  * false otherwise.
  */
@@ -47,16 +28,16 @@ static bool seq_nr_after(u16 a, u16 b)
        /* Remove inconsistency where
         * seq_nr_after(a, b) == seq_nr_before(a, b)
         */
-       if ((int) b - a == 32768)
+       if ((int)b - a == 32768)
                return false;
 
-       return (((s16) (b - a)) < 0);
+       return (((s16)(b - a)) < 0);
 }
+
 #define seq_nr_before(a, b)            seq_nr_after((b), (a))
 #define seq_nr_after_or_eq(a, b)       (!seq_nr_before((a), (b)))
 #define seq_nr_before_or_eq(a, b)      (!seq_nr_after((a), (b)))
 
-
 bool hsr_addr_is_self(struct hsr_priv *hsr, unsigned char *addr)
 {
        struct hsr_node *node;
@@ -68,9 +49,9 @@ bool hsr_addr_is_self(struct hsr_priv *hsr, unsigned char *addr)
                return false;
        }
 
-       if (ether_addr_equal(addr, node->MacAddressA))
+       if (ether_addr_equal(addr, node->macaddress_A))
                return true;
-       if (ether_addr_equal(addr, node->MacAddressB))
+       if (ether_addr_equal(addr, node->macaddress_B))
                return true;
 
        return false;
@@ -78,20 +59,19 @@ bool hsr_addr_is_self(struct hsr_priv *hsr, unsigned char *addr)
 
 /* Search for mac entry. Caller must hold rcu read lock.
  */
-static struct hsr_node *find_node_by_AddrA(struct list_head *node_db,
-                                          const unsigned char addr[ETH_ALEN])
+static struct hsr_node *find_node_by_addr_A(struct list_head *node_db,
+                                           const unsigned char addr[ETH_ALEN])
 {
        struct hsr_node *node;
 
        list_for_each_entry_rcu(node, node_db, mac_list) {
-               if (ether_addr_equal(node->MacAddressA, addr))
+               if (ether_addr_equal(node->macaddress_A, addr))
                        return node;
        }
 
        return NULL;
 }
 
-
 /* Helper for device init; the self_node_db is used in hsr_rcv() to recognize
  * frames from self that's been looped over the HSR ring.
  */
@@ -105,12 +85,12 @@ int hsr_create_self_node(struct list_head *self_node_db,
        if (!node)
                return -ENOMEM;
 
-       ether_addr_copy(node->MacAddressA, addr_a);
-       ether_addr_copy(node->MacAddressB, addr_b);
+       ether_addr_copy(node->macaddress_A, addr_a);
+       ether_addr_copy(node->macaddress_B, addr_b);
 
        rcu_read_lock();
        oldnode = list_first_or_null_rcu(self_node_db,
-                                               struct hsr_node, mac_list);
+                                        struct hsr_node, mac_list);
        if (oldnode) {
                list_replace_rcu(&oldnode->mac_list, &node->mac_list);
                rcu_read_unlock();
@@ -137,7 +117,7 @@ void hsr_del_node(struct list_head *self_node_db)
        }
 }
 
-/* Allocate an hsr_node and add it to node_db. 'addr' is the node's AddressA;
+/* Allocate an hsr_node and add it to node_db. 'addr' is the node's address_A;
  * seq_out is used to initialize filtering of outgoing duplicate frames
  * originating from the newly added node.
  */
@@ -152,7 +132,7 @@ struct hsr_node *hsr_add_node(struct list_head *node_db, unsigned char addr[],
        if (!node)
                return NULL;
 
-       ether_addr_copy(node->MacAddressA, addr);
+       ether_addr_copy(node->macaddress_A, addr);
 
        /* We are only interested in time diffs here, so use current jiffies
         * as initialization. (0 could trigger an spurious ring error warning).
@@ -181,19 +161,19 @@ struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb,
        if (!skb_mac_header_was_set(skb))
                return NULL;
 
-       ethhdr = (struct ethhdr *) skb_mac_header(skb);
+       ethhdr = (struct ethhdr *)skb_mac_header(skb);
 
        list_for_each_entry_rcu(node, node_db, mac_list) {
-               if (ether_addr_equal(node->MacAddressA, ethhdr->h_source))
+               if (ether_addr_equal(node->macaddress_A, ethhdr->h_source))
                        return node;
-               if (ether_addr_equal(node->MacAddressB, ethhdr->h_source))
+               if (ether_addr_equal(node->macaddress_B, ethhdr->h_source))
                        return node;
        }
 
        /* Everyone may create a node entry, connected node to a HSR device. */
 
-       if (ethhdr->h_proto == htons(ETH_P_PRP)
-                       || ethhdr->h_proto == htons(ETH_P_HSR)) {
+       if (ethhdr->h_proto == htons(ETH_P_PRP) ||
+           ethhdr->h_proto == htons(ETH_P_HSR)) {
                /* Use the existing sequence_nr from the tag as starting point
                 * for filtering duplicate frames.
                 */
@@ -210,8 +190,8 @@ struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb,
        return hsr_add_node(node_db, ethhdr->h_source, seq_out);
 }
 
-/* Use the Supervision frame's info about an eventual MacAddressB for merging
- * nodes that has previously had their MacAddressB registered as a separate
+/* Use the Supervision frame's info about an eventual macaddress_B for merging
+ * nodes that has previously had their macaddress_B registered as a separate
  * node.
  */
 void hsr_handle_sup_frame(struct sk_buff *skb, struct hsr_node *node_curr,
@@ -223,7 +203,7 @@ void hsr_handle_sup_frame(struct sk_buff *skb, struct hsr_node *node_curr,
        struct list_head *node_db;
        int i;
 
-       ethhdr = (struct ethhdr *) skb_mac_header(skb);
+       ethhdr = (struct ethhdr *)skb_mac_header(skb);
 
        /* Leave the ethernet header. */
        skb_pull(skb, sizeof(struct ethhdr));
@@ -235,14 +215,14 @@ void hsr_handle_sup_frame(struct sk_buff *skb, struct hsr_node *node_curr,
        /* And leave the HSR sup tag. */
        skb_pull(skb, sizeof(struct hsr_sup_tag));
 
-       hsr_sp = (struct hsr_sup_payload *) skb->data;
+       hsr_sp = (struct hsr_sup_payload *)skb->data;
 
-       /* Merge node_curr (registered on MacAddressB) into node_real */
+       /* Merge node_curr (registered on macaddress_B) into node_real */
        node_db = &port_rcv->hsr->node_db;
-       node_real = find_node_by_AddrA(node_db, hsr_sp->MacAddressA);
+       node_real = find_node_by_addr_A(node_db, hsr_sp->macaddress_A);
        if (!node_real)
                /* No frame received from AddrA of this node yet */
-               node_real = hsr_add_node(node_db, hsr_sp->MacAddressA,
+               node_real = hsr_add_node(node_db, hsr_sp->macaddress_A,
                                         HSR_SEQNR_START - 1);
        if (!node_real)
                goto done; /* No mem */
@@ -250,17 +230,18 @@ void hsr_handle_sup_frame(struct sk_buff *skb, struct hsr_node *node_curr,
                /* Node has already been merged */
                goto done;
 
-       ether_addr_copy(node_real->MacAddressB, ethhdr->h_source);
+       ether_addr_copy(node_real->macaddress_B, ethhdr->h_source);
        for (i = 0; i < HSR_PT_PORTS; i++) {
                if (!node_curr->time_in_stale[i] &&
                    time_after(node_curr->time_in[i], node_real->time_in[i])) {
                        node_real->time_in[i] = node_curr->time_in[i];
-                       node_real->time_in_stale[i] = node_curr->time_in_stale[i];
+                       node_real->time_in_stale[i] =
+                                               node_curr->time_in_stale[i];
                }
                if (seq_nr_after(node_curr->seq_out[i], node_real->seq_out[i]))
                        node_real->seq_out[i] = node_curr->seq_out[i];
        }
-       node_real->AddrB_port = port_rcv->type;
+       node_real->addr_B_port = port_rcv->type;
 
        list_del_rcu(&node_curr->mac_list);
        kfree_rcu(node_curr, rcu_head);
@@ -269,11 +250,10 @@ void hsr_handle_sup_frame(struct sk_buff *skb, struct hsr_node *node_curr,
        skb_push(skb, sizeof(struct hsrv1_ethhdr_sp));
 }
 
-
 /* 'skb' is a frame meant for this host, that is to be passed to upper layers.
  *
  * If the frame was sent by a node's B interface, replace the source
- * address with that node's "official" address (MacAddressA) so that upper
+ * address with that node's "official" address (macaddress_A) so that upper
  * layers recognize where it came from.
  */
 void hsr_addr_subst_source(struct hsr_node *node, struct sk_buff *skb)
@@ -283,7 +263,7 @@ void hsr_addr_subst_source(struct hsr_node *node, struct sk_buff *skb)
                return;
        }
 
-       memcpy(&eth_hdr(skb)->h_source, node->MacAddressA, ETH_ALEN);
+       memcpy(&eth_hdr(skb)->h_source, node->macaddress_A, ETH_ALEN);
 }
 
 /* 'skb' is a frame meant for another host.
@@ -308,18 +288,18 @@ void hsr_addr_subst_dest(struct hsr_node *node_src, struct sk_buff *skb,
        if (!is_unicast_ether_addr(eth_hdr(skb)->h_dest))
                return;
 
-       node_dst = find_node_by_AddrA(&port->hsr->node_db, eth_hdr(skb)->h_dest);
+       node_dst = find_node_by_addr_A(&port->hsr->node_db,
+                                      eth_hdr(skb)->h_dest);
        if (!node_dst) {
                WARN_ONCE(1, "%s: Unknown node\n", __func__);
                return;
        }
-       if (port->type != node_dst->AddrB_port)
+       if (port->type != node_dst->addr_B_port)
                return;
 
-       ether_addr_copy(eth_hdr(skb)->h_dest, node_dst->MacAddressB);
+       ether_addr_copy(eth_hdr(skb)->h_dest, node_dst->macaddress_B);
 }
 
-
 void hsr_register_frame_in(struct hsr_node *node, struct hsr_port *port,
                           u16 sequence_nr)
 {
@@ -352,7 +332,6 @@ int hsr_register_frame_out(struct hsr_port *port, struct hsr_node *node,
        return 0;
 }
 
-
 static struct hsr_port *get_late_port(struct hsr_priv *hsr,
                                      struct hsr_node *node)
 {
@@ -373,7 +352,6 @@ static struct hsr_port *get_late_port(struct hsr_priv *hsr,
        return NULL;
 }
 
-
 /* Remove stale sequence_nr records. Called by timer every
  * HSR_LIFE_CHECK_INTERVAL (two seconds or so).
  */
@@ -392,9 +370,9 @@ void hsr_prune_nodes(struct timer_list *t)
                time_b = node->time_in[HSR_PT_SLAVE_B];
 
                /* Check for timestamps old enough to risk wrap-around */
-               if (time_after(jiffies, time_a + MAX_JIFFY_OFFSET/2))
+               if (time_after(jiffies, time_a + MAX_JIFFY_OFFSET / 2))
                        node->time_in_stale[HSR_PT_SLAVE_A] = true;
-               if (time_after(jiffies, time_b + MAX_JIFFY_OFFSET/2))
+               if (time_after(jiffies, time_b + MAX_JIFFY_OFFSET / 2))
                        node->time_in_stale[HSR_PT_SLAVE_B] = true;
 
                /* Get age of newest frame from node.
@@ -409,26 +387,29 @@ void hsr_prune_nodes(struct timer_list *t)
 
                /* Warn of ring error only as long as we get frames at all */
                if (time_is_after_jiffies(timestamp +
-                                       msecs_to_jiffies(1.5*MAX_SLAVE_DIFF))) {
+                               msecs_to_jiffies(1.5 * MAX_SLAVE_DIFF))) {
                        rcu_read_lock();
                        port = get_late_port(hsr, node);
-                       if (port != NULL)
-                               hsr_nl_ringerror(hsr, node->MacAddressA, port);
+                       if (port)
+                               hsr_nl_ringerror(hsr, node->macaddress_A, port);
                        rcu_read_unlock();
                }
 
                /* Prune old entries */
                if (time_is_before_jiffies(timestamp +
-                                       msecs_to_jiffies(HSR_NODE_FORGET_TIME))) {
-                       hsr_nl_nodedown(hsr, node->MacAddressA);
+                               msecs_to_jiffies(HSR_NODE_FORGET_TIME))) {
+                       hsr_nl_nodedown(hsr, node->macaddress_A);
                        list_del_rcu(&node->mac_list);
                        /* Note that we need to free this entry later: */
                        kfree_rcu(node, rcu_head);
                }
        }
        rcu_read_unlock();
-}
 
+       /* Restart timer */
+       mod_timer(&hsr->prune_timer,
+                 jiffies + msecs_to_jiffies(PRUNE_PERIOD));
+}
 
 void *hsr_get_next_node(struct hsr_priv *hsr, void *_pos,
                        unsigned char addr[ETH_ALEN])
@@ -439,20 +420,19 @@ void *hsr_get_next_node(struct hsr_priv *hsr, void *_pos,
                node = list_first_or_null_rcu(&hsr->node_db,
                                              struct hsr_node, mac_list);
                if (node)
-                       ether_addr_copy(addr, node->MacAddressA);
+                       ether_addr_copy(addr, node->macaddress_A);
                return node;
        }
 
        node = _pos;
        list_for_each_entry_continue_rcu(node, &hsr->node_db, mac_list) {
-               ether_addr_copy(addr, node->MacAddressA);
+               ether_addr_copy(addr, node->macaddress_A);
                return node;
        }
 
        return NULL;
 }
 
-
 int hsr_get_node_data(struct hsr_priv *hsr,
                      const unsigned char *addr,
                      unsigned char addr_b[ETH_ALEN],
@@ -466,15 +446,14 @@ int hsr_get_node_data(struct hsr_priv *hsr,
        struct hsr_port *port;
        unsigned long tdiff;
 
-
        rcu_read_lock();
-       node = find_node_by_AddrA(&hsr->node_db, addr);
+       node = find_node_by_addr_A(&hsr->node_db, addr);
        if (!node) {
                rcu_read_unlock();
                return -ENOENT; /* No such entry */
        }
 
-       ether_addr_copy(addr_b, node->MacAddressB);
+       ether_addr_copy(addr_b, node->macaddress_B);
 
        tdiff = jiffies - node->time_in[HSR_PT_SLAVE_A];
        if (node->time_in_stale[HSR_PT_SLAVE_A])
@@ -500,8 +479,8 @@ int hsr_get_node_data(struct hsr_priv *hsr,
        *if1_seq = node->seq_out[HSR_PT_SLAVE_B];
        *if2_seq = node->seq_out[HSR_PT_SLAVE_A];
 
-       if (node->AddrB_port != HSR_PT_NONE) {
-               port = hsr_port_get_hsr(hsr, node->AddrB_port);
+       if (node->addr_B_port != HSR_PT_NONE) {
+               port = hsr_port_get_hsr(hsr, node->addr_B_port);
                *addr_b_ifindex = port->dev->ifindex;
        } else {
                *addr_b_ifindex = -1;
index 531fd3dfcac1efae09302284c15e08b4c80a4d33..a3bdcdab469d5383f77f3d41e77beb33f5860cec 100644 (file)
@@ -1,9 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright 2011-2014 Autronica Fire and Security AS
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
  *
  * Author(s):
  *     2011-2014 Arvid Brodin, arvid.brodin@alten.se
@@ -52,4 +48,16 @@ int hsr_get_node_data(struct hsr_priv *hsr,
                      int *if2_age,
                      u16 *if2_seq);
 
+struct hsr_node {
+       struct list_head        mac_list;
+       unsigned char           macaddress_A[ETH_ALEN];
+       unsigned char           macaddress_B[ETH_ALEN];
+       /* Local slave through which AddrB frames are received from this node */
+       enum hsr_port_type      addr_B_port;
+       unsigned long           time_in[HSR_PT_PORTS];
+       bool                    time_in_stale[HSR_PT_PORTS];
+       u16                     seq_out[HSR_PT_PORTS];
+       struct rcu_head         rcu_head;
+};
+
 #endif /* __HSR_FRAMEREG_H */
index cd37d0011b424824fd113ffd4da59f36c116996a..b9988a662ee1ac14c3ad8b96ea18a2dca92f5254 100644 (file)
@@ -1,9 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright 2011-2014 Autronica Fire and Security AS
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
  *
  * Author(s):
  *     2011-2014 Arvid Brodin, arvid.brodin@alten.se
@@ -19,7 +15,6 @@
 #include "hsr_framereg.h"
 #include "hsr_slave.h"
 
-
 static int hsr_netdev_notify(struct notifier_block *nb, unsigned long event,
                             void *ptr)
 {
@@ -31,12 +26,12 @@ static int hsr_netdev_notify(struct notifier_block *nb, unsigned long event,
 
        dev = netdev_notifier_info_to_dev(ptr);
        port = hsr_port_get_rtnl(dev);
-       if (port == NULL) {
+       if (!port) {
                if (!is_hsr_master(dev))
                        return NOTIFY_DONE;     /* Not an HSR device */
                hsr = netdev_priv(dev);
                port = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
-               if (port == NULL) {
+               if (!port) {
                        /* Resend of notification concerning removed device? */
                        return NOTIFY_DONE;
                }
@@ -63,7 +58,8 @@ static int hsr_netdev_notify(struct notifier_block *nb, unsigned long event,
 
                if (port->type == HSR_PT_SLAVE_A) {
                        ether_addr_copy(master->dev->dev_addr, dev->dev_addr);
-                       call_netdevice_notifiers(NETDEV_CHANGEADDR, master->dev);
+                       call_netdevice_notifiers(NETDEV_CHANGEADDR,
+                                                master->dev);
                }
 
                /* Make sure we recognize frames from ourselves in hsr_rcv() */
@@ -97,7 +93,6 @@ static int hsr_netdev_notify(struct notifier_block *nb, unsigned long event,
        return NOTIFY_DONE;
 }
 
-
 struct hsr_port *hsr_port_get_hsr(struct hsr_priv *hsr, enum hsr_port_type pt)
 {
        struct hsr_port *port;
@@ -112,7 +107,6 @@ static struct notifier_block hsr_nb = {
        .notifier_call = hsr_netdev_notify,     /* Slave event notifications */
 };
 
-
 static int __init hsr_init(void)
 {
        int res;
index 9b9909e89e9e855851b2fa6ab8bed321cdcfafc7..96fac696a1e1a602fdf46c4df74016b7baf50008 100644 (file)
@@ -1,9 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright 2011-2014 Autronica Fire and Security AS
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
  *
  * Author(s):
  *     2011-2014 Arvid Brodin, arvid.brodin@alten.se
@@ -15,7 +11,6 @@
 #include <linux/netdevice.h>
 #include <linux/list.h>
 
-
 /* Time constants as specified in the HSR specification (IEC-62439-3 2010)
  * Table 8.
  * All values in milliseconds.
@@ -24,7 +19,6 @@
 #define HSR_NODE_FORGET_TIME           60000 /* ms */
 #define HSR_ANNOUNCE_INTERVAL            100 /* ms */
 
-
 /* By how much may slave1 and slave2 timestamps of latest received frame from
  * each node differ before we notify of communication problem?
  */
 #define HSR_SEQNR_START                        (USHRT_MAX - 1024)
 #define HSR_SUP_SEQNR_START            (HSR_SEQNR_START / 2)
 
-
 /* How often shall we check for broken ring and remove node entries older than
  * HSR_NODE_FORGET_TIME?
  */
 #define PRUNE_PERIOD                    3000 /* ms */
 
-
 #define HSR_TLV_ANNOUNCE                  22
 #define HSR_TLV_LIFE_CHECK                23
 
-
 /* HSR Tag.
  * As defined in IEC-62439-3:2010, the HSR tag is really { ethertype = 0x88FB,
  * path, LSDU_size, sequence Nr }. But we let eth_header() create { h_dest,
@@ -83,15 +74,14 @@ static inline u16 get_hsr_tag_LSDU_size(struct hsr_tag *ht)
 
 static inline void set_hsr_tag_path(struct hsr_tag *ht, u16 path)
 {
-       ht->path_and_LSDU_size = htons(
-                       (ntohs(ht->path_and_LSDU_size) & 0x0FFF) | (path << 12));
+       ht->path_and_LSDU_size =
+               htons((ntohs(ht->path_and_LSDU_size) & 0x0FFF) | (path << 12));
 }
 
 static inline void set_hsr_tag_LSDU_size(struct hsr_tag *ht, u16 LSDU_size)
 {
-       ht->path_and_LSDU_size = htons(
-                       (ntohs(ht->path_and_LSDU_size) & 0xF000) |
-                       (LSDU_size & 0x0FFF));
+       ht->path_and_LSDU_size = htons((ntohs(ht->path_and_LSDU_size) &
+                                      0xF000) | (LSDU_size & 0x0FFF));
 }
 
 struct hsr_ethhdr {
@@ -99,39 +89,38 @@ struct hsr_ethhdr {
        struct hsr_tag  hsr_tag;
 } __packed;
 
-
 /* HSR Supervision Frame data types.
  * Field names as defined in the IEC:2010 standard for HSR.
  */
 struct hsr_sup_tag {
-       __be16          path_and_HSR_Ver;
+       __be16          path_and_HSR_ver;
        __be16          sequence_nr;
-       __u8            HSR_TLV_Type;
-       __u8            HSR_TLV_Length;
+       __u8            HSR_TLV_type;
+       __u8            HSR_TLV_length;
 } __packed;
 
 struct hsr_sup_payload {
-       unsigned char   MacAddressA[ETH_ALEN];
+       unsigned char   macaddress_A[ETH_ALEN];
 } __packed;
 
 static inline u16 get_hsr_stag_path(struct hsr_sup_tag *hst)
 {
-       return get_hsr_tag_path((struct hsr_tag *) hst);
+       return get_hsr_tag_path((struct hsr_tag *)hst);
 }
 
 static inline u16 get_hsr_stag_HSR_ver(struct hsr_sup_tag *hst)
 {
-       return get_hsr_tag_LSDU_size((struct hsr_tag *) hst);
+       return get_hsr_tag_LSDU_size((struct hsr_tag *)hst);
 }
 
 static inline void set_hsr_stag_path(struct hsr_sup_tag *hst, u16 path)
 {
-       set_hsr_tag_path((struct hsr_tag *) hst, path);
+       set_hsr_tag_path((struct hsr_tag *)hst, path);
 }
 
-static inline void set_hsr_stag_HSR_Ver(struct hsr_sup_tag *hst, u16 HSR_Ver)
+static inline void set_hsr_stag_HSR_ver(struct hsr_sup_tag *hst, u16 HSR_ver)
 {
-       set_hsr_tag_LSDU_size((struct hsr_tag *) hst, HSR_Ver);
+       set_hsr_tag_LSDU_size((struct hsr_tag *)hst, HSR_ver);
 }
 
 struct hsrv0_ethhdr_sp {
@@ -145,7 +134,6 @@ struct hsrv1_ethhdr_sp {
        struct hsr_sup_tag      hsr_sup;
 } __packed;
 
-
 enum hsr_port_type {
        HSR_PT_NONE = 0,        /* Must be 0, used by framereg */
        HSR_PT_SLAVE_A,
@@ -171,10 +159,14 @@ struct hsr_priv {
        struct timer_list       prune_timer;
        int announce_count;
        u16 sequence_nr;
-       u16 sup_sequence_nr;                    /* For HSRv1 separate seq_nr for supervision */
-       u8 protVersion;                                 /* Indicate if HSRv0 or HSRv1. */
+       u16 sup_sequence_nr;    /* For HSRv1 separate seq_nr for supervision */
+       u8 prot_version;                /* Indicate if HSRv0 or HSRv1. */
        spinlock_t seqnr_lock;                  /* locking for sequence_nr */
        unsigned char           sup_multicast_addr[ETH_ALEN];
+#ifdef CONFIG_DEBUG_FS
+       struct dentry *node_tbl_root;
+       struct dentry *node_tbl_file;
+#endif
 };
 
 #define hsr_for_each_port(hsr, port) \
@@ -187,8 +179,22 @@ static inline u16 hsr_get_skb_sequence_nr(struct sk_buff *skb)
 {
        struct hsr_ethhdr *hsr_ethhdr;
 
-       hsr_ethhdr = (struct hsr_ethhdr *) skb_mac_header(skb);
+       hsr_ethhdr = (struct hsr_ethhdr *)skb_mac_header(skb);
        return ntohs(hsr_ethhdr->hsr_tag.sequence_nr);
 }
 
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+int hsr_debugfs_init(struct hsr_priv *priv, struct net_device *hsr_dev);
+void hsr_debugfs_term(struct hsr_priv *priv);
+#else
+static inline int hsr_debugfs_init(struct hsr_priv *priv,
+                                  struct net_device *hsr_dev)
+{
+       return 0;
+}
+
+static inline void hsr_debugfs_term(struct hsr_priv *priv)
+{}
+#endif
+
 #endif /*  __HSR_PRIVATE_H */
index bcc04d3e724f401d4cd1d91d683e9ff128c81432..c2d5a368d6d863a830a496d4cb04bb2951c63b72 100644 (file)
@@ -1,9 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright 2011-2014 Autronica Fire and Security AS
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
  *
  * Author(s):
  *     2011-2014 Arvid Brodin, arvid.brodin@alten.se
@@ -28,7 +24,6 @@ static const struct nla_policy hsr_policy[IFLA_HSR_MAX + 1] = {
        [IFLA_HSR_SEQ_NR]               = { .type = NLA_U16 },
 };
 
-
 /* Here, it seems a netdevice has already been allocated for us, and the
  * hsr_dev_setup routine has been executed. Nice!
  */
@@ -47,12 +42,14 @@ static int hsr_newlink(struct net *src_net, struct net_device *dev,
                netdev_info(dev, "HSR: Slave1 device not specified\n");
                return -EINVAL;
        }
-       link[0] = __dev_get_by_index(src_net, nla_get_u32(data[IFLA_HSR_SLAVE1]));
+       link[0] = __dev_get_by_index(src_net,
+                                    nla_get_u32(data[IFLA_HSR_SLAVE1]));
        if (!data[IFLA_HSR_SLAVE2]) {
                netdev_info(dev, "HSR: Slave2 device not specified\n");
                return -EINVAL;
        }
-       link[1] = __dev_get_by_index(src_net, nla_get_u32(data[IFLA_HSR_SLAVE2]));
+       link[1] = __dev_get_by_index(src_net,
+                                    nla_get_u32(data[IFLA_HSR_SLAVE2]));
 
        if (!link[0] || !link[1])
                return -ENODEV;
@@ -119,8 +116,6 @@ static struct rtnl_link_ops hsr_link_ops __read_mostly = {
        .fill_info      = hsr_fill_info,
 };
 
-
-
 /* attribute policy */
 static const struct nla_policy hsr_genl_policy[HSR_A_MAX + 1] = {
        [HSR_A_NODE_ADDR] = { .len = ETH_ALEN },
@@ -138,8 +133,6 @@ static const struct genl_multicast_group hsr_mcgrps[] = {
        { .name = "hsr-network", },
 };
 
-
-
 /* This is called if for some node with MAC address addr, we only get frames
  * over one of the slave interfaces. This would indicate an open network ring
  * (i.e. a link has failed somewhere).
@@ -156,7 +149,8 @@ void hsr_nl_ringerror(struct hsr_priv *hsr, unsigned char addr[ETH_ALEN],
        if (!skb)
                goto fail;
 
-       msg_head = genlmsg_put(skb, 0, 0, &hsr_genl_family, 0, HSR_C_RING_ERROR);
+       msg_head = genlmsg_put(skb, 0, 0, &hsr_genl_family, 0,
+                              HSR_C_RING_ERROR);
        if (!msg_head)
                goto nla_put_failure;
 
@@ -201,7 +195,6 @@ void hsr_nl_nodedown(struct hsr_priv *hsr, unsigned char addr[ETH_ALEN])
        if (!msg_head)
                goto nla_put_failure;
 
-
        res = nla_put(skb, HSR_A_NODE_ADDR, ETH_ALEN, addr);
        if (res < 0)
                goto nla_put_failure;
@@ -221,7 +214,6 @@ void hsr_nl_nodedown(struct hsr_priv *hsr, unsigned char addr[ETH_ALEN])
        rcu_read_unlock();
 }
 
-
 /* HSR_C_GET_NODE_STATUS lets userspace query the internal HSR node table
  * about the status of a specific node in the network, defined by its MAC
  * address.
@@ -260,15 +252,13 @@ static int hsr_get_node_status(struct sk_buff *skb_in, struct genl_info *info)
                goto invalid;
 
        hsr_dev = __dev_get_by_index(genl_info_net(info),
-                                       nla_get_u32(info->attrs[HSR_A_IFINDEX]));
+                                    nla_get_u32(info->attrs[HSR_A_IFINDEX]));
        if (!hsr_dev)
                goto invalid;
        if (!is_hsr_master(hsr_dev))
                goto invalid;
 
-
        /* Send reply */
-
        skb_out = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
        if (!skb_out) {
                res = -ENOMEM;
@@ -276,8 +266,8 @@ static int hsr_get_node_status(struct sk_buff *skb_in, struct genl_info *info)
        }
 
        msg_head = genlmsg_put(skb_out, NETLINK_CB(skb_in).portid,
-                               info->snd_seq, &hsr_genl_family, 0,
-                               HSR_C_SET_NODE_STATUS);
+                              info->snd_seq, &hsr_genl_family, 0,
+                              HSR_C_SET_NODE_STATUS);
        if (!msg_head) {
                res = -ENOMEM;
                goto nla_put_failure;
@@ -289,28 +279,30 @@ static int hsr_get_node_status(struct sk_buff *skb_in, struct genl_info *info)
 
        hsr = netdev_priv(hsr_dev);
        res = hsr_get_node_data(hsr,
-                       (unsigned char *) nla_data(info->attrs[HSR_A_NODE_ADDR]),
-                       hsr_node_addr_b,
-                       &addr_b_ifindex,
-                       &hsr_node_if1_age,
-                       &hsr_node_if1_seq,
-                       &hsr_node_if2_age,
-                       &hsr_node_if2_seq);
+                               (unsigned char *)
+                               nla_data(info->attrs[HSR_A_NODE_ADDR]),
+                                        hsr_node_addr_b,
+                                        &addr_b_ifindex,
+                                        &hsr_node_if1_age,
+                                        &hsr_node_if1_seq,
+                                        &hsr_node_if2_age,
+                                        &hsr_node_if2_seq);
        if (res < 0)
                goto nla_put_failure;
 
        res = nla_put(skb_out, HSR_A_NODE_ADDR, ETH_ALEN,
-                                       nla_data(info->attrs[HSR_A_NODE_ADDR]));
+                     nla_data(info->attrs[HSR_A_NODE_ADDR]));
        if (res < 0)
                goto nla_put_failure;
 
        if (addr_b_ifindex > -1) {
                res = nla_put(skb_out, HSR_A_NODE_ADDR_B, ETH_ALEN,
-                                                               hsr_node_addr_b);
+                             hsr_node_addr_b);
                if (res < 0)
                        goto nla_put_failure;
 
-               res = nla_put_u32(skb_out, HSR_A_ADDR_B_IFINDEX, addr_b_ifindex);
+               res = nla_put_u32(skb_out, HSR_A_ADDR_B_IFINDEX,
+                                 addr_b_ifindex);
                if (res < 0)
                        goto nla_put_failure;
        }
@@ -392,9 +384,7 @@ static int hsr_get_node_list(struct sk_buff *skb_in, struct genl_info *info)
        if (!is_hsr_master(hsr_dev))
                goto invalid;
 
-
        /* Send reply */
-
        skb_out = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
        if (!skb_out) {
                res = -ENOMEM;
@@ -402,8 +392,8 @@ static int hsr_get_node_list(struct sk_buff *skb_in, struct genl_info *info)
        }
 
        msg_head = genlmsg_put(skb_out, NETLINK_CB(skb_in).portid,
-                               info->snd_seq, &hsr_genl_family, 0,
-                               HSR_C_SET_NODE_LIST);
+                              info->snd_seq, &hsr_genl_family, 0,
+                              HSR_C_SET_NODE_LIST);
        if (!msg_head) {
                res = -ENOMEM;
                goto nla_put_failure;
@@ -444,7 +434,6 @@ static int hsr_get_node_list(struct sk_buff *skb_in, struct genl_info *info)
        return res;
 }
 
-
 static const struct genl_ops hsr_ops[] = {
        {
                .cmd = HSR_C_GET_NODE_STATUS,
index 3f6b95b5b6b8841b045493468ccce641ea55d258..1121bb192a18e8be5a15320907327b427f3b4c29 100644 (file)
@@ -1,9 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright 2011-2014 Autronica Fire and Security AS
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
  *
  * Author(s):
  *     2011-2014 Arvid Brodin, arvid.brodin@alten.se
index 56080da4aa77ef581f4a3c23b4179d72e511980b..88b6705ded837165cd06544d98ea4fe513747f66 100644 (file)
@@ -1,9 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright 2011-2014 Autronica Fire and Security AS
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
  *
  * Author(s):
  *     2011-2014 Arvid Brodin, arvid.brodin@alten.se
@@ -18,7 +14,6 @@
 #include "hsr_forward.h"
 #include "hsr_framereg.h"
 
-
 static rx_handler_result_t hsr_handle_frame(struct sk_buff **pskb)
 {
        struct sk_buff *skb = *pskb;
@@ -61,12 +56,11 @@ bool hsr_port_exists(const struct net_device *dev)
        return rcu_access_pointer(dev->rx_handler) == hsr_handle_frame;
 }
 
-
 static int hsr_check_dev_ok(struct net_device *dev)
 {
        /* Don't allow HSR on non-ethernet like devices */
-       if ((dev->flags & IFF_LOOPBACK) || (dev->type != ARPHRD_ETHER) ||
-           (dev->addr_len != ETH_ALEN)) {
+       if ((dev->flags & IFF_LOOPBACK) || dev->type != ARPHRD_ETHER ||
+           dev->addr_len != ETH_ALEN) {
                netdev_info(dev, "Cannot use loopback or non-ethernet device as HSR slave.\n");
                return -EINVAL;
        }
@@ -99,7 +93,6 @@ static int hsr_check_dev_ok(struct net_device *dev)
        return 0;
 }
 
-
 /* Setup device to be added to the HSR bridge. */
 static int hsr_portdev_setup(struct net_device *dev, struct hsr_port *port)
 {
@@ -143,11 +136,11 @@ int hsr_add_port(struct hsr_priv *hsr, struct net_device *dev,
        }
 
        port = hsr_port_get_hsr(hsr, type);
-       if (port != NULL)
+       if (port)
                return -EBUSY;  /* This port already exists */
 
        port = kzalloc(sizeof(*port), GFP_KERNEL);
-       if (port == NULL)
+       if (!port)
                return -ENOMEM;
 
        if (type != HSR_PT_MASTER) {
@@ -184,7 +177,7 @@ void hsr_del_port(struct hsr_port *port)
        list_del_rcu(&port->port_list);
 
        if (port != master) {
-               if (master != NULL) {
+               if (master) {
                        netdev_update_features(master->dev);
                        dev_set_mtu(master->dev, hsr_get_max_mtu(hsr));
                }
index 3ccfbf71c92ef608f9ed5b65ce3e490c4d7abda7..64b54952959207809ffa7632c989f49c88c73143 100644 (file)
@@ -1,11 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright 2011-2014 Autronica Fire and Security AS
  *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- * Author(s):
  *     2011-2014 Arvid Brodin, arvid.brodin@alten.se
  */
 
index 7f3a984ad618580ae28501c3fe3dd3fa915a66a2..08a8430f5647de15da425a81d900e8c76a037bf1 100644 (file)
@@ -160,7 +160,7 @@ void inet_sock_destruct(struct sock *sk)
        WARN_ON(sk->sk_forward_alloc);
 
        kfree(rcu_dereference_protected(inet->inet_opt, 1));
-       dst_release(rcu_dereference_check(sk->sk_dst_cache, 1));
+       dst_release(rcu_dereference_protected(sk->sk_dst_cache, 1));
        dst_release(sk->sk_rx_dst);
        sk_refcnt_debug_dec(sk);
 }
index ffbe24397dbef7d5dee04b13760144e1b72957c6..d4b63f94f7be27afdc595161514ac8a60512f411 100644 (file)
@@ -307,7 +307,7 @@ __be32 fib_compute_spec_dst(struct sk_buff *skb)
                        .flowi4_mark = vmark ? skb->mark : 0,
                };
                if (!fib_lookup(net, &fl4, &res, 0))
-                       return FIB_RES_PREFSRC(net, res);
+                       return fib_result_prefsrc(net, &res);
        } else {
                scope = RT_SCOPE_LINK;
        }
@@ -390,7 +390,7 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
 
        dev_match = fib_info_nh_uses_dev(res.fi, dev);
        if (dev_match) {
-               ret = FIB_RES_NH(res).fib_nh_scope >= RT_SCOPE_HOST;
+               ret = FIB_RES_NHC(res)->nhc_scope >= RT_SCOPE_HOST;
                return ret;
        }
        if (no_addr)
@@ -402,7 +402,7 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
        ret = 0;
        if (fib_lookup(net, &fl4, &res, FIB_LOOKUP_IGNORE_LINKSTATE) == 0) {
                if (res.type == RTN_UNICAST)
-                       ret = FIB_RES_NH(res).fib_nh_scope >= RT_SCOPE_HOST;
+                       ret = FIB_RES_NHC(res)->nhc_scope >= RT_SCOPE_HOST;
        }
        return ret;
 
@@ -558,7 +558,8 @@ static int rtentry_to_fib_config(struct net *net, int cmd, struct rtentry *rt,
        if (rt->rt_gateway.sa_family == AF_INET && addr) {
                unsigned int addr_type;
 
-               cfg->fc_gw = addr;
+               cfg->fc_gw4 = addr;
+               cfg->fc_gw_family = AF_INET;
                addr_type = inet_addr_type_table(net, addr, cfg->fc_table);
                if (rt->rt_flags & RTF_GATEWAY &&
                    addr_type == RTN_UNICAST)
@@ -568,7 +569,7 @@ static int rtentry_to_fib_config(struct net *net, int cmd, struct rtentry *rt,
        if (cmd == SIOCDELRT)
                return 0;
 
-       if (rt->rt_flags & RTF_GATEWAY && !cfg->fc_gw)
+       if (rt->rt_flags & RTF_GATEWAY && !cfg->fc_gw_family)
                return -EINVAL;
 
        if (cfg->fc_scope == RT_SCOPE_NOWHERE)
@@ -664,10 +665,55 @@ const struct nla_policy rtm_ipv4_policy[RTA_MAX + 1] = {
        [RTA_DPORT]             = { .type = NLA_U16 },
 };
 
+int fib_gw_from_via(struct fib_config *cfg, struct nlattr *nla,
+                   struct netlink_ext_ack *extack)
+{
+       struct rtvia *via;
+       int alen;
+
+       if (nla_len(nla) < offsetof(struct rtvia, rtvia_addr)) {
+               NL_SET_ERR_MSG(extack, "Invalid attribute length for RTA_VIA");
+               return -EINVAL;
+       }
+
+       via = nla_data(nla);
+       alen = nla_len(nla) - offsetof(struct rtvia, rtvia_addr);
+
+       switch (via->rtvia_family) {
+       case AF_INET:
+               if (alen != sizeof(__be32)) {
+                       NL_SET_ERR_MSG(extack, "Invalid IPv4 address in RTA_VIA");
+                       return -EINVAL;
+               }
+               cfg->fc_gw_family = AF_INET;
+               cfg->fc_gw4 = *((__be32 *)via->rtvia_addr);
+               break;
+       case AF_INET6:
+#ifdef CONFIG_IPV6
+               if (alen != sizeof(struct in6_addr)) {
+                       NL_SET_ERR_MSG(extack, "Invalid IPv6 address in RTA_VIA");
+                       return -EINVAL;
+               }
+               cfg->fc_gw_family = AF_INET6;
+               cfg->fc_gw6 = *((struct in6_addr *)via->rtvia_addr);
+#else
+               NL_SET_ERR_MSG(extack, "IPv6 support not enabled in kernel");
+               return -EINVAL;
+#endif
+               break;
+       default:
+               NL_SET_ERR_MSG(extack, "Unsupported address family in RTA_VIA");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
 static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
                             struct nlmsghdr *nlh, struct fib_config *cfg,
                             struct netlink_ext_ack *extack)
 {
+       bool has_gw = false, has_via = false;
        struct nlattr *attr;
        int err, remaining;
        struct rtmsg *rtm;
@@ -708,12 +754,17 @@ static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
                        cfg->fc_oif = nla_get_u32(attr);
                        break;
                case RTA_GATEWAY:
-                       cfg->fc_gw = nla_get_be32(attr);
+                       has_gw = true;
+                       cfg->fc_gw4 = nla_get_be32(attr);
+                       if (cfg->fc_gw4)
+                               cfg->fc_gw_family = AF_INET;
                        break;
                case RTA_VIA:
-                       NL_SET_ERR_MSG(extack, "IPv4 does not support RTA_VIA attribute");
-                       err = -EINVAL;
-                       goto errout;
+                       has_via = true;
+                       err = fib_gw_from_via(cfg, attr, extack);
+                       if (err)
+                               goto errout;
+                       break;
                case RTA_PRIORITY:
                        cfg->fc_priority = nla_get_u32(attr);
                        break;
@@ -752,6 +803,12 @@ static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
                }
        }
 
+       if (has_gw && has_via) {
+               NL_SET_ERR_MSG(extack,
+                              "Nexthop configuration can not contain both GATEWAY and VIA");
+               goto errout;
+       }
+
        return 0;
 errout:
        return err;
index e6ff282bb7f42c2a941ae6c3f2785ca989150ad1..7945f0534db72ecda20ab7f77ae499a0270d3cd1 100644 (file)
@@ -45,6 +45,7 @@ static inline void fib_result_assign(struct fib_result *res,
 {
        /* we used to play games with refcounts, but we now use RCU */
        res->fi = fi;
+       res->nhc = fib_info_nhc(fi, 0);
 }
 
 struct fib_prop {
index df777af7e278edabed652bc92ae6dce7169cfbbf..779d2be2b1356d3e61d47a3b544fb40464b6fdb3 100644 (file)
 #include <net/tcp.h>
 #include <net/sock.h>
 #include <net/ip_fib.h>
+#include <net/ip6_fib.h>
 #include <net/netlink.h>
 #include <net/nexthop.h>
 #include <net/lwtunnel.h>
 #include <net/fib_notifier.h>
+#include <net/addrconf.h>
 
 #include "fib_lookup.h"
 
@@ -275,7 +277,7 @@ static inline int nh_comp(const struct fib_info *fi, const struct fib_info *ofi)
 
        for_nexthops(fi) {
                if (nh->fib_nh_oif != onh->fib_nh_oif ||
-                   nh->fib_nh_gw4 != onh->fib_nh_gw4 ||
+                   nh->fib_nh_gw_family != onh->fib_nh_gw_family ||
                    nh->fib_nh_scope != onh->fib_nh_scope ||
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
                    nh->fib_nh_weight != onh->fib_nh_weight ||
@@ -286,6 +288,15 @@ static inline int nh_comp(const struct fib_info *fi, const struct fib_info *ofi)
                    lwtunnel_cmp_encap(nh->fib_nh_lws, onh->fib_nh_lws) ||
                    ((nh->fib_nh_flags ^ onh->fib_nh_flags) & ~RTNH_COMPARE_MASK))
                        return -1;
+
+               if (nh->fib_nh_gw_family == AF_INET &&
+                   nh->fib_nh_gw4 != onh->fib_nh_gw4)
+                       return -1;
+
+               if (nh->fib_nh_gw_family == AF_INET6 &&
+                   ipv6_addr_cmp(&nh->fib_nh_gw6, &onh->fib_nh_gw6))
+                       return -1;
+
                onh++;
        } endfor_nexthops(fi);
        return 0;
@@ -446,10 +457,18 @@ static int fib_detect_death(struct fib_info *fi, int order,
                            struct fib_info **last_resort, int *last_idx,
                            int dflt)
 {
+       const struct fib_nh_common *nhc = fib_info_nhc(fi, 0);
        struct neighbour *n;
        int state = NUD_NONE;
 
-       n = neigh_lookup(&arp_tbl, &fi->fib_nh[0].fib_nh_gw4, fi->fib_dev);
+       if (likely(nhc->nhc_gw_family == AF_INET))
+               n = neigh_lookup(&arp_tbl, &nhc->nhc_gw.ipv4, nhc->nhc_dev);
+       else if (nhc->nhc_gw_family == AF_INET6)
+               n = neigh_lookup(ipv6_stub->nd_tbl, &nhc->nhc_gw.ipv6,
+                                nhc->nhc_dev);
+       else
+               n = NULL;
+
        if (n) {
                state = n->nud_state;
                neigh_release(n);
@@ -510,10 +529,12 @@ int fib_nh_init(struct net *net, struct fib_nh *nh,
                goto init_failure;
 
        nh->fib_nh_oif = cfg->fc_oif;
-       if (cfg->fc_gw) {
-               nh->fib_nh_gw4 = cfg->fc_gw;
-               nh->fib_nh_has_gw = 1;
-       }
+       nh->fib_nh_gw_family = cfg->fc_gw_family;
+       if (cfg->fc_gw_family == AF_INET)
+               nh->fib_nh_gw4 = cfg->fc_gw4;
+       else if (cfg->fc_gw_family == AF_INET6)
+               nh->fib_nh_gw6 = cfg->fc_gw6;
+
        nh->fib_nh_flags = cfg->fc_flags;
 
 #ifdef CONFIG_IP_ROUTE_CLASSID
@@ -585,11 +606,24 @@ static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
 
                attrlen = rtnh_attrlen(rtnh);
                if (attrlen > 0) {
-                       struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
+                       struct nlattr *nla, *nlav, *attrs = rtnh_attrs(rtnh);
 
                        nla = nla_find(attrs, attrlen, RTA_GATEWAY);
-                       if (nla)
-                               fib_cfg.fc_gw = nla_get_in_addr(nla);
+                       nlav = nla_find(attrs, attrlen, RTA_VIA);
+                       if (nla && nlav) {
+                               NL_SET_ERR_MSG(extack,
+                                              "Nexthop configuration can not contain both GATEWAY and VIA");
+                               return -EINVAL;
+                       }
+                       if (nla) {
+                               fib_cfg.fc_gw4 = nla_get_in_addr(nla);
+                               if (fib_cfg.fc_gw4)
+                                       fib_cfg.fc_gw_family = AF_INET;
+                       } else if (nlav) {
+                               ret = fib_gw_from_via(&fib_cfg, nlav, extack);
+                               if (ret)
+                                       goto errout;
+                       }
 
                        nla = nla_find(attrs, attrlen, RTA_FLOW);
                        if (nla)
@@ -615,10 +649,16 @@ static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
                               "Nexthop device index does not match RTA_OIF");
                goto errout;
        }
-       if (cfg->fc_gw && fi->fib_nh->fib_nh_gw4 != cfg->fc_gw) {
-               NL_SET_ERR_MSG(extack,
-                              "Nexthop gateway does not match RTA_GATEWAY");
-               goto errout;
+       if (cfg->fc_gw_family) {
+               if (cfg->fc_gw_family != fi->fib_nh->fib_nh_gw_family ||
+                   (cfg->fc_gw_family == AF_INET &&
+                    fi->fib_nh->fib_nh_gw4 != cfg->fc_gw4) ||
+                   (cfg->fc_gw_family == AF_INET6 &&
+                    ipv6_addr_cmp(&fi->fib_nh->fib_nh_gw6, &cfg->fc_gw6))) {
+                       NL_SET_ERR_MSG(extack,
+                                      "Nexthop gateway does not match RTA_GATEWAY or RTA_VIA");
+                       goto errout;
+               }
        }
 #ifdef CONFIG_IP_ROUTE_CLASSID
        if (cfg->fc_flow && fi->fib_nh->nh_tclassid != cfg->fc_flow) {
@@ -718,7 +758,7 @@ int fib_nh_match(struct fib_config *cfg, struct fib_info *fi,
        if (cfg->fc_priority && cfg->fc_priority != fi->fib_priority)
                return 1;
 
-       if (cfg->fc_oif || cfg->fc_gw) {
+       if (cfg->fc_oif || cfg->fc_gw_family) {
                if (cfg->fc_encap) {
                        if (fib_encap_match(cfg->fc_encap_type, cfg->fc_encap,
                                            fi->fib_nh, cfg, extack))
@@ -729,10 +769,20 @@ int fib_nh_match(struct fib_config *cfg, struct fib_info *fi,
                    cfg->fc_flow != fi->fib_nh->nh_tclassid)
                        return 1;
 #endif
-               if ((!cfg->fc_oif || cfg->fc_oif == fi->fib_nh->fib_nh_oif) &&
-                   (!cfg->fc_gw  || cfg->fc_gw == fi->fib_nh->fib_nh_gw4))
-                       return 0;
-               return 1;
+               if ((cfg->fc_oif && cfg->fc_oif != fi->fib_nh->fib_nh_oif) ||
+                   (cfg->fc_gw_family &&
+                    cfg->fc_gw_family != fi->fib_nh->fib_nh_gw_family))
+                       return 1;
+
+               if (cfg->fc_gw_family == AF_INET &&
+                   cfg->fc_gw4 != fi->fib_nh->fib_nh_gw4)
+                       return 1;
+
+               if (cfg->fc_gw_family == AF_INET6 &&
+                   ipv6_addr_cmp(&cfg->fc_gw6, &fi->fib_nh->fib_nh_gw6))
+                       return 1;
+
+               return 0;
        }
 
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
@@ -753,11 +803,43 @@ int fib_nh_match(struct fib_config *cfg, struct fib_info *fi,
 
                attrlen = rtnh_attrlen(rtnh);
                if (attrlen > 0) {
-                       struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
+                       struct nlattr *nla, *nlav, *attrs = rtnh_attrs(rtnh);
 
                        nla = nla_find(attrs, attrlen, RTA_GATEWAY);
-                       if (nla && nla_get_in_addr(nla) != nh->fib_nh_gw4)
-                               return 1;
+                       nlav = nla_find(attrs, attrlen, RTA_VIA);
+                       if (nla && nlav) {
+                               NL_SET_ERR_MSG(extack,
+                                              "Nexthop configuration can not contain both GATEWAY and VIA");
+                               return -EINVAL;
+                       }
+
+                       if (nla) {
+                               if (nh->fib_nh_gw_family != AF_INET ||
+                                   nla_get_in_addr(nla) != nh->fib_nh_gw4)
+                                       return 1;
+                       } else if (nlav) {
+                               struct fib_config cfg2;
+                               int err;
+
+                               err = fib_gw_from_via(&cfg2, nlav, extack);
+                               if (err)
+                                       return err;
+
+                               switch (nh->fib_nh_gw_family) {
+                               case AF_INET:
+                                       if (cfg2.fc_gw_family != AF_INET ||
+                                           cfg2.fc_gw4 != nh->fib_nh_gw4)
+                                               return 1;
+                                       break;
+                               case AF_INET6:
+                                       if (cfg2.fc_gw_family != AF_INET6 ||
+                                           ipv6_addr_cmp(&cfg2.fc_gw6,
+                                                         &nh->fib_nh_gw6))
+                                               return 1;
+                                       break;
+                               }
+                       }
+
 #ifdef CONFIG_IP_ROUTE_CLASSID
                        nla = nla_find(attrs, attrlen, RTA_FLOW);
                        if (nla && nla_get_u32(nla) != nh->nh_tclassid)
@@ -811,6 +893,30 @@ bool fib_metrics_match(struct fib_config *cfg, struct fib_info *fi)
        return true;
 }
 
+static int fib_check_nh_v6_gw(struct net *net, struct fib_nh *nh,
+                             u32 table, struct netlink_ext_ack *extack)
+{
+       struct fib6_config cfg = {
+               .fc_table = table,
+               .fc_flags = nh->fib_nh_flags | RTF_GATEWAY,
+               .fc_ifindex = nh->fib_nh_oif,
+               .fc_gateway = nh->fib_nh_gw6,
+       };
+       struct fib6_nh fib6_nh = {};
+       int err;
+
+       err = ipv6_stub->fib6_nh_init(net, &fib6_nh, &cfg, GFP_KERNEL, extack);
+       if (!err) {
+               nh->fib_nh_dev = fib6_nh.fib_nh_dev;
+               dev_hold(nh->fib_nh_dev);
+               nh->fib_nh_oif = nh->fib_nh_dev->ifindex;
+               nh->fib_nh_scope = RT_SCOPE_LINK;
+
+               ipv6_stub->fib6_nh_release(&fib6_nh);
+       }
+
+       return err;
+}
 
 /*
  * Picture
@@ -855,134 +961,152 @@ bool fib_metrics_match(struct fib_config *cfg, struct fib_info *fi)
  *                                     |
  *                                     |-> {local prefix} (terminal node)
  */
-static int fib_check_nh(struct fib_config *cfg, struct fib_nh *nh,
-                       struct netlink_ext_ack *extack)
+static int fib_check_nh_v4_gw(struct net *net, struct fib_nh *nh, u32 table,
+                             u8 scope, struct netlink_ext_ack *extack)
 {
-       int err = 0;
-       struct net *net;
        struct net_device *dev;
+       struct fib_result res;
+       int err;
 
-       net = cfg->fc_nlinfo.nl_net;
-       if (nh->fib_nh_gw4) {
-               struct fib_result res;
-
-               if (nh->fib_nh_flags & RTNH_F_ONLINK) {
-                       unsigned int addr_type;
+       if (nh->fib_nh_flags & RTNH_F_ONLINK) {
+               unsigned int addr_type;
 
-                       if (cfg->fc_scope >= RT_SCOPE_LINK) {
-                               NL_SET_ERR_MSG(extack,
-                                              "Nexthop has invalid scope");
-                               return -EINVAL;
-                       }
-                       dev = __dev_get_by_index(net, nh->fib_nh_oif);
-                       if (!dev) {
-                               NL_SET_ERR_MSG(extack, "Nexthop device required for onlink");
-                               return -ENODEV;
-                       }
-                       if (!(dev->flags & IFF_UP)) {
-                               NL_SET_ERR_MSG(extack,
-                                              "Nexthop device is not up");
-                               return -ENETDOWN;
-                       }
-                       addr_type = inet_addr_type_dev_table(net, dev,
-                                                            nh->fib_nh_gw4);
-                       if (addr_type != RTN_UNICAST) {
-                               NL_SET_ERR_MSG(extack,
-                                              "Nexthop has invalid gateway");
-                               return -EINVAL;
-                       }
-                       if (!netif_carrier_ok(dev))
-                               nh->fib_nh_flags |= RTNH_F_LINKDOWN;
-                       nh->fib_nh_dev = dev;
-                       dev_hold(dev);
-                       nh->fib_nh_scope = RT_SCOPE_LINK;
-                       return 0;
+               if (scope >= RT_SCOPE_LINK) {
+                       NL_SET_ERR_MSG(extack, "Nexthop has invalid scope");
+                       return -EINVAL;
                }
-               rcu_read_lock();
-               {
-                       struct fib_table *tbl = NULL;
-                       struct flowi4 fl4 = {
-                               .daddr = nh->fib_nh_gw4,
-                               .flowi4_scope = cfg->fc_scope + 1,
-                               .flowi4_oif = nh->fib_nh_oif,
-                               .flowi4_iif = LOOPBACK_IFINDEX,
-                       };
-
-                       /* It is not necessary, but requires a bit of thinking */
-                       if (fl4.flowi4_scope < RT_SCOPE_LINK)
-                               fl4.flowi4_scope = RT_SCOPE_LINK;
-
-                       if (cfg->fc_table)
-                               tbl = fib_get_table(net, cfg->fc_table);
-
-                       if (tbl)
-                               err = fib_table_lookup(tbl, &fl4, &res,
-                                                      FIB_LOOKUP_IGNORE_LINKSTATE |
-                                                      FIB_LOOKUP_NOREF);
-
-                       /* on error or if no table given do full lookup. This
-                        * is needed for example when nexthops are in the local
-                        * table rather than the given table
-                        */
-                       if (!tbl || err) {
-                               err = fib_lookup(net, &fl4, &res,
-                                                FIB_LOOKUP_IGNORE_LINKSTATE);
-                       }
-
-                       if (err) {
-                               NL_SET_ERR_MSG(extack,
-                                              "Nexthop has invalid gateway");
-                               rcu_read_unlock();
-                               return err;
-                       }
+               dev = __dev_get_by_index(net, nh->fib_nh_oif);
+               if (!dev) {
+                       NL_SET_ERR_MSG(extack, "Nexthop device required for onlink");
+                       return -ENODEV;
                }
-               err = -EINVAL;
-               if (res.type != RTN_UNICAST && res.type != RTN_LOCAL) {
-                       NL_SET_ERR_MSG(extack, "Nexthop has invalid gateway");
-                       goto out;
+               if (!(dev->flags & IFF_UP)) {
+                       NL_SET_ERR_MSG(extack, "Nexthop device is not up");
+                       return -ENETDOWN;
                }
-               nh->fib_nh_scope = res.scope;
-               nh->fib_nh_oif = FIB_RES_OIF(res);
-               nh->fib_nh_dev = dev = FIB_RES_DEV(res);
-               if (!dev) {
-                       NL_SET_ERR_MSG(extack,
-                                      "No egress device for nexthop gateway");
-                       goto out;
+               addr_type = inet_addr_type_dev_table(net, dev, nh->fib_nh_gw4);
+               if (addr_type != RTN_UNICAST) {
+                       NL_SET_ERR_MSG(extack, "Nexthop has invalid gateway");
+                       return -EINVAL;
                }
-               dev_hold(dev);
                if (!netif_carrier_ok(dev))
                        nh->fib_nh_flags |= RTNH_F_LINKDOWN;
-               err = (dev->flags & IFF_UP) ? 0 : -ENETDOWN;
-       } else {
-               struct in_device *in_dev;
-
-               if (nh->fib_nh_flags & (RTNH_F_PERVASIVE | RTNH_F_ONLINK)) {
-                       NL_SET_ERR_MSG(extack,
-                                      "Invalid flags for nexthop - PERVASIVE and ONLINK can not be set");
-                       return -EINVAL;
+               nh->fib_nh_dev = dev;
+               dev_hold(dev);
+               nh->fib_nh_scope = RT_SCOPE_LINK;
+               return 0;
+       }
+       rcu_read_lock();
+       {
+               struct fib_table *tbl = NULL;
+               struct flowi4 fl4 = {
+                       .daddr = nh->fib_nh_gw4,
+                       .flowi4_scope = scope + 1,
+                       .flowi4_oif = nh->fib_nh_oif,
+                       .flowi4_iif = LOOPBACK_IFINDEX,
+               };
+
+               /* It is not necessary, but requires a bit of thinking */
+               if (fl4.flowi4_scope < RT_SCOPE_LINK)
+                       fl4.flowi4_scope = RT_SCOPE_LINK;
+
+               if (table)
+                       tbl = fib_get_table(net, table);
+
+               if (tbl)
+                       err = fib_table_lookup(tbl, &fl4, &res,
+                                              FIB_LOOKUP_IGNORE_LINKSTATE |
+                                              FIB_LOOKUP_NOREF);
+
+               /* on error or if no table given do full lookup. This
+                * is needed for example when nexthops are in the local
+                * table rather than the given table
+                */
+               if (!tbl || err) {
+                       err = fib_lookup(net, &fl4, &res,
+                                        FIB_LOOKUP_IGNORE_LINKSTATE);
                }
-               rcu_read_lock();
-               err = -ENODEV;
-               in_dev = inetdev_by_index(net, nh->fib_nh_oif);
-               if (!in_dev)
-                       goto out;
-               err = -ENETDOWN;
-               if (!(in_dev->dev->flags & IFF_UP)) {
-                       NL_SET_ERR_MSG(extack, "Device for nexthop is not up");
+
+               if (err) {
+                       NL_SET_ERR_MSG(extack, "Nexthop has invalid gateway");
                        goto out;
                }
-               nh->fib_nh_dev = in_dev->dev;
-               dev_hold(nh->fib_nh_dev);
-               nh->fib_nh_scope = RT_SCOPE_HOST;
-               if (!netif_carrier_ok(nh->fib_nh_dev))
-                       nh->fib_nh_flags |= RTNH_F_LINKDOWN;
-               err = 0;
        }
+
+       err = -EINVAL;
+       if (res.type != RTN_UNICAST && res.type != RTN_LOCAL) {
+               NL_SET_ERR_MSG(extack, "Nexthop has invalid gateway");
+               goto out;
+       }
+       nh->fib_nh_scope = res.scope;
+       nh->fib_nh_oif = FIB_RES_OIF(res);
+       nh->fib_nh_dev = dev = FIB_RES_DEV(res);
+       if (!dev) {
+               NL_SET_ERR_MSG(extack,
+                              "No egress device for nexthop gateway");
+               goto out;
+       }
+       dev_hold(dev);
+       if (!netif_carrier_ok(dev))
+               nh->fib_nh_flags |= RTNH_F_LINKDOWN;
+       err = (dev->flags & IFF_UP) ? 0 : -ENETDOWN;
 out:
        rcu_read_unlock();
        return err;
 }
 
+static int fib_check_nh_nongw(struct net *net, struct fib_nh *nh,
+                             struct netlink_ext_ack *extack)
+{
+       struct in_device *in_dev;
+       int err;
+
+       if (nh->fib_nh_flags & (RTNH_F_PERVASIVE | RTNH_F_ONLINK)) {
+               NL_SET_ERR_MSG(extack,
+                              "Invalid flags for nexthop - PERVASIVE and ONLINK can not be set");
+               return -EINVAL;
+       }
+
+       rcu_read_lock();
+
+       err = -ENODEV;
+       in_dev = inetdev_by_index(net, nh->fib_nh_oif);
+       if (!in_dev)
+               goto out;
+       err = -ENETDOWN;
+       if (!(in_dev->dev->flags & IFF_UP)) {
+               NL_SET_ERR_MSG(extack, "Device for nexthop is not up");
+               goto out;
+       }
+
+       nh->fib_nh_dev = in_dev->dev;
+       dev_hold(nh->fib_nh_dev);
+       nh->fib_nh_scope = RT_SCOPE_HOST;
+       if (!netif_carrier_ok(nh->fib_nh_dev))
+               nh->fib_nh_flags |= RTNH_F_LINKDOWN;
+       err = 0;
+out:
+       rcu_read_unlock();
+       return err;
+}
+
+static int fib_check_nh(struct fib_config *cfg, struct fib_nh *nh,
+                       struct netlink_ext_ack *extack)
+{
+       struct net *net = cfg->fc_nlinfo.nl_net;
+       u32 table = cfg->fc_table;
+       int err;
+
+       if (nh->fib_nh_gw_family == AF_INET)
+               err = fib_check_nh_v4_gw(net, nh, table, cfg->fc_scope, extack);
+       else if (nh->fib_nh_gw_family == AF_INET6)
+               err = fib_check_nh_v6_gw(net, nh, table, extack);
+       else
+               err = fib_check_nh_nongw(net, nh, extack);
+
+       return err;
+}
+
 static inline unsigned int fib_laddr_hashfn(__be32 val)
 {
        unsigned int mask = (fib_info_hash_size - 1);
@@ -1075,6 +1199,21 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
        return nh->nh_saddr;
 }
 
+__be32 fib_result_prefsrc(struct net *net, struct fib_result *res)
+{
+       struct fib_nh_common *nhc = res->nhc;
+       struct fib_nh *nh;
+
+       if (res->fi->fib_prefsrc)
+               return res->fi->fib_prefsrc;
+
+       nh = container_of(nhc, struct fib_nh, nh_common);
+       if (nh->nh_saddr_genid == atomic_read(&net->ipv4.dev_addr_genid))
+               return nh->nh_saddr;
+
+       return fib_info_update_nh_saddr(net, nh);
+}
+
 static bool fib_valid_prefsrc(struct fib_config *cfg, __be32 fib_prefsrc)
 {
        if (cfg->fc_type != RTN_LOCAL || !cfg->fc_dst ||
@@ -1188,7 +1327,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg,
                goto failure;
 
        if (fib_props[cfg->fc_type].error) {
-               if (cfg->fc_gw || cfg->fc_oif || cfg->fc_mp) {
+               if (cfg->fc_gw_family || cfg->fc_oif || cfg->fc_mp) {
                        NL_SET_ERR_MSG(extack,
                                       "Gateway, device and multipath can not be specified for this route type");
                        goto err_inval;
@@ -1222,7 +1361,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg,
                                       "Route with host scope can not have multiple nexthops");
                        goto err_inval;
                }
-               if (nh->fib_nh_gw4) {
+               if (nh->fib_nh_gw_family) {
                        NL_SET_ERR_MSG(extack,
                                       "Route with host scope can not have a gateway");
                        goto err_inval;
@@ -1253,6 +1392,8 @@ struct fib_info *fib_create_info(struct fib_config *cfg,
 
        change_nexthops(fi) {
                fib_info_update_nh_saddr(net, nexthop_nh);
+               if (nexthop_nh->fib_nh_gw_family == AF_INET6)
+                       fi->fib_nh_is_v6 = true;
        } endfor_nexthops(fi)
 
        fib_rebalance(fi);
@@ -1302,6 +1443,140 @@ struct fib_info *fib_create_info(struct fib_config *cfg,
        return ERR_PTR(err);
 }
 
+int fib_nexthop_info(struct sk_buff *skb, const struct fib_nh_common *nhc,
+                    unsigned int *flags, bool skip_oif)
+{
+       if (nhc->nhc_flags & RTNH_F_DEAD)
+               *flags |= RTNH_F_DEAD;
+
+       if (nhc->nhc_flags & RTNH_F_LINKDOWN) {
+               *flags |= RTNH_F_LINKDOWN;
+
+               rcu_read_lock();
+               switch (nhc->nhc_family) {
+               case AF_INET:
+                       if (ip_ignore_linkdown(nhc->nhc_dev))
+                               *flags |= RTNH_F_DEAD;
+                       break;
+               case AF_INET6:
+                       if (ip6_ignore_linkdown(nhc->nhc_dev))
+                               *flags |= RTNH_F_DEAD;
+                       break;
+               }
+               rcu_read_unlock();
+       }
+
+       switch (nhc->nhc_gw_family) {
+       case AF_INET:
+               if (nla_put_in_addr(skb, RTA_GATEWAY, nhc->nhc_gw.ipv4))
+                       goto nla_put_failure;
+               break;
+       case AF_INET6:
+               /* if gateway family does not match nexthop family
+                * gateway is encoded as RTA_VIA
+                */
+               if (nhc->nhc_gw_family != nhc->nhc_family) {
+                       int alen = sizeof(struct in6_addr);
+                       struct nlattr *nla;
+                       struct rtvia *via;
+
+                       nla = nla_reserve(skb, RTA_VIA, alen + 2);
+                       if (!nla)
+                               goto nla_put_failure;
+
+                       via = nla_data(nla);
+                       via->rtvia_family = AF_INET6;
+                       memcpy(via->rtvia_addr, &nhc->nhc_gw.ipv6, alen);
+               } else if (nla_put_in6_addr(skb, RTA_GATEWAY,
+                                           &nhc->nhc_gw.ipv6) < 0) {
+                       goto nla_put_failure;
+               }
+               break;
+       }
+
+       *flags |= (nhc->nhc_flags & RTNH_F_ONLINK);
+       if (nhc->nhc_flags & RTNH_F_OFFLOAD)
+               *flags |= RTNH_F_OFFLOAD;
+
+       if (!skip_oif && nhc->nhc_dev &&
+           nla_put_u32(skb, RTA_OIF, nhc->nhc_dev->ifindex))
+               goto nla_put_failure;
+
+       if (nhc->nhc_lwtstate &&
+           lwtunnel_fill_encap(skb, nhc->nhc_lwtstate) < 0)
+               goto nla_put_failure;
+
+       return 0;
+
+nla_put_failure:
+       return -EMSGSIZE;
+}
+EXPORT_SYMBOL_GPL(fib_nexthop_info);
+
+#if IS_ENABLED(CONFIG_IP_ROUTE_MULTIPATH) || IS_ENABLED(CONFIG_IPV6)
+int fib_add_nexthop(struct sk_buff *skb, const struct fib_nh_common *nhc,
+                   int nh_weight)
+{
+       const struct net_device *dev = nhc->nhc_dev;
+       struct rtnexthop *rtnh;
+       unsigned int flags = 0;
+
+       rtnh = nla_reserve_nohdr(skb, sizeof(*rtnh));
+       if (!rtnh)
+               goto nla_put_failure;
+
+       rtnh->rtnh_hops = nh_weight - 1;
+       rtnh->rtnh_ifindex = dev ? dev->ifindex : 0;
+
+       if (fib_nexthop_info(skb, nhc, &flags, true) < 0)
+               goto nla_put_failure;
+
+       rtnh->rtnh_flags = flags;
+
+       /* length of rtnetlink header + attributes */
+       rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *)rtnh;
+
+       return 0;
+
+nla_put_failure:
+       return -EMSGSIZE;
+}
+EXPORT_SYMBOL_GPL(fib_add_nexthop);
+#endif
+
+#ifdef CONFIG_IP_ROUTE_MULTIPATH
+static int fib_add_multipath(struct sk_buff *skb, struct fib_info *fi)
+{
+       struct nlattr *mp;
+
+       mp = nla_nest_start(skb, RTA_MULTIPATH);
+       if (!mp)
+               goto nla_put_failure;
+
+       for_nexthops(fi) {
+               if (fib_add_nexthop(skb, &nh->nh_common, nh->fib_nh_weight) < 0)
+                       goto nla_put_failure;
+#ifdef CONFIG_IP_ROUTE_CLASSID
+               if (nh->nh_tclassid &&
+                   nla_put_u32(skb, RTA_FLOW, nh->nh_tclassid))
+                       goto nla_put_failure;
+#endif
+       } endfor_nexthops(fi);
+
+       nla_nest_end(skb, mp);
+
+       return 0;
+
+nla_put_failure:
+       return -EMSGSIZE;
+}
+#else
+static int fib_add_multipath(struct sk_buff *skb, struct fib_info *fi)
+{
+       return 0;
+}
+#endif
+
 int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
                  u32 tb_id, u8 type, __be32 dst, int dst_len, u8 tos,
                  struct fib_info *fi, unsigned int flags)
@@ -1342,72 +1617,23 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
            nla_put_in_addr(skb, RTA_PREFSRC, fi->fib_prefsrc))
                goto nla_put_failure;
        if (fi->fib_nhs == 1) {
-               if (fi->fib_nh->fib_nh_gw4 &&
-                   nla_put_in_addr(skb, RTA_GATEWAY, fi->fib_nh->fib_nh_gw4))
-                       goto nla_put_failure;
-               if (fi->fib_nh->fib_nh_oif &&
-                   nla_put_u32(skb, RTA_OIF, fi->fib_nh->fib_nh_oif))
+               struct fib_nh *nh = &fi->fib_nh[0];
+               unsigned int flags = 0;
+
+               if (fib_nexthop_info(skb, &nh->nh_common, &flags, false) < 0)
                        goto nla_put_failure;
-               if (fi->fib_nh->fib_nh_flags & RTNH_F_LINKDOWN) {
-                       rcu_read_lock();
-                       if (ip_ignore_linkdown(fi->fib_nh->fib_nh_dev))
-                               rtm->rtm_flags |= RTNH_F_DEAD;
-                       rcu_read_unlock();
-               }
-               if (fi->fib_nh->fib_nh_flags & RTNH_F_OFFLOAD)
-                       rtm->rtm_flags |= RTNH_F_OFFLOAD;
+
+               rtm->rtm_flags = flags;
 #ifdef CONFIG_IP_ROUTE_CLASSID
-               if (fi->fib_nh[0].nh_tclassid &&
-                   nla_put_u32(skb, RTA_FLOW, fi->fib_nh[0].nh_tclassid))
+               if (nh->nh_tclassid &&
+                   nla_put_u32(skb, RTA_FLOW, nh->nh_tclassid))
                        goto nla_put_failure;
 #endif
-               if (fi->fib_nh->fib_nh_lws &&
-                   lwtunnel_fill_encap(skb, fi->fib_nh->fib_nh_lws) < 0)
+       } else {
+               if (fib_add_multipath(skb, fi) < 0)
                        goto nla_put_failure;
        }
-#ifdef CONFIG_IP_ROUTE_MULTIPATH
-       if (fi->fib_nhs > 1) {
-               struct rtnexthop *rtnh;
-               struct nlattr *mp;
-
-               mp = nla_nest_start(skb, RTA_MULTIPATH);
-               if (!mp)
-                       goto nla_put_failure;
-
-               for_nexthops(fi) {
-                       rtnh = nla_reserve_nohdr(skb, sizeof(*rtnh));
-                       if (!rtnh)
-                               goto nla_put_failure;
 
-                       rtnh->rtnh_flags = nh->fib_nh_flags & 0xFF;
-                       if (nh->fib_nh_flags & RTNH_F_LINKDOWN) {
-                               rcu_read_lock();
-                               if (ip_ignore_linkdown(nh->fib_nh_dev))
-                                       rtnh->rtnh_flags |= RTNH_F_DEAD;
-                               rcu_read_unlock();
-                       }
-                       rtnh->rtnh_hops = nh->fib_nh_weight - 1;
-                       rtnh->rtnh_ifindex = nh->fib_nh_oif;
-
-                       if (nh->fib_nh_gw4 &&
-                           nla_put_in_addr(skb, RTA_GATEWAY, nh->fib_nh_gw4))
-                               goto nla_put_failure;
-#ifdef CONFIG_IP_ROUTE_CLASSID
-                       if (nh->nh_tclassid &&
-                           nla_put_u32(skb, RTA_FLOW, nh->nh_tclassid))
-                               goto nla_put_failure;
-#endif
-                       if (nh->fib_nh_lws &&
-                           lwtunnel_fill_encap(skb, nh->fib_nh_lws) < 0)
-                               goto nla_put_failure;
-
-                       /* length of rtnetlink header + attributes */
-                       rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *) rtnh;
-               } endfor_nexthops(fi);
-
-               nla_nest_end(skb, mp);
-       }
-#endif
        nlmsg_end(skb, nlh);
        return 0;
 
@@ -1745,8 +1971,14 @@ static bool fib_good_nh(const struct fib_nh *nh)
 
                rcu_read_lock_bh();
 
-               n = __ipv4_neigh_lookup_noref(nh->fib_nh_dev,
-                                             (__force u32)nh->fib_nh_gw4);
+               if (likely(nh->fib_nh_gw_family == AF_INET))
+                       n = __ipv4_neigh_lookup_noref(nh->fib_nh_dev,
+                                                  (__force u32)nh->fib_nh_gw4);
+               else if (nh->fib_nh_gw_family == AF_INET6)
+                       n = __ipv6_neigh_lookup_noref_stub(nh->fib_nh_dev,
+                                                          &nh->fib_nh_gw6);
+               else
+                       n = NULL;
                if (n)
                        state = n->nud_state;
 
@@ -1762,20 +1994,22 @@ void fib_select_multipath(struct fib_result *res, int hash)
        struct net *net = fi->fib_net;
        bool first = false;
 
-       for_nexthops(fi) {
+       change_nexthops(fi) {
                if (net->ipv4.sysctl_fib_multipath_use_neigh) {
-                       if (!fib_good_nh(nh))
+                       if (!fib_good_nh(nexthop_nh))
                                continue;
                        if (!first) {
                                res->nh_sel = nhsel;
+                               res->nhc = &nexthop_nh->nh_common;
                                first = true;
                        }
                }
 
-               if (hash > atomic_read(&nh->fib_nh_upper_bound))
+               if (hash > atomic_read(&nexthop_nh->fib_nh_upper_bound))
                        continue;
 
                res->nh_sel = nhsel;
+               res->nhc = &nexthop_nh->nh_common;
                return;
        } endfor_nexthops(fi);
 }
@@ -1802,5 +2036,5 @@ void fib_select_path(struct net *net, struct fib_result *res,
 
 check_saddr:
        if (!fl4->saddr)
-               fl4->saddr = FIB_RES_PREFSRC(net, *res);
+               fl4->saddr = fib_result_prefsrc(net, res);
 }
index 1e3b492690f9996d4811f040bae2cd31e92354c5..334f723bdf802d2b169fc99631e7af2be7e90fb5 100644 (file)
@@ -1470,17 +1470,17 @@ int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp,
                if (fi->fib_flags & RTNH_F_DEAD)
                        continue;
                for (nhsel = 0; nhsel < fi->fib_nhs; nhsel++) {
-                       const struct fib_nh *nh = &fi->fib_nh[nhsel];
+                       struct fib_nh_common *nhc = fib_info_nhc(fi, nhsel);
 
-                       if (nh->fib_nh_flags & RTNH_F_DEAD)
+                       if (nhc->nhc_flags & RTNH_F_DEAD)
                                continue;
-                       if (ip_ignore_linkdown(nh->fib_nh_dev) &&
-                           nh->fib_nh_flags & RTNH_F_LINKDOWN &&
+                       if (ip_ignore_linkdown(nhc->nhc_dev) &&
+                           nhc->nhc_flags & RTNH_F_LINKDOWN &&
                            !(fib_flags & FIB_LOOKUP_IGNORE_LINKSTATE))
                                continue;
                        if (!(flp->flowi4_flags & FLOWI_FLAG_SKIP_NH_OIF)) {
                                if (flp->flowi4_oif &&
-                                   flp->flowi4_oif != nh->fib_nh_oif)
+                                   flp->flowi4_oif != nhc->nhc_oif)
                                        continue;
                        }
 
@@ -1490,6 +1490,7 @@ int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp,
                        res->prefix = htonl(n->key);
                        res->prefixlen = KEYLENGTH - fa->fa_slen;
                        res->nh_sel = nhsel;
+                       res->nhc = nhc;
                        res->type = fa->fa_type;
                        res->scope = fi->fib_scope;
                        res->fi = fi;
@@ -1498,7 +1499,7 @@ int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp,
 #ifdef CONFIG_IP_FIB_TRIE_STATS
                        this_cpu_inc(stats->semantic_match_passed);
 #endif
-                       trace_fib_table_lookup(tb->tb_id, flp, nh, err);
+                       trace_fib_table_lookup(tb->tb_id, flp, nhc, err);
 
                        return err;
                }
index 100e63f57ea6c43a42a84ec5fb16812f17c0744e..1ca1586a7e46db11ff06870006d8494a960b94b2 100644 (file)
@@ -121,6 +121,7 @@ static int gue_udp_recv(struct sock *sk, struct sk_buff *skb)
        struct guehdr *guehdr;
        void *data;
        u16 doffset = 0;
+       u8 proto_ctype;
 
        if (!fou)
                return 1;
@@ -136,7 +137,7 @@ static int gue_udp_recv(struct sock *sk, struct sk_buff *skb)
                break;
 
        case 1: {
-               /* Direct encasulation of IPv4 or IPv6 */
+               /* Direct encapsulation of IPv4 or IPv6 */
 
                int prot;
 
@@ -170,9 +171,7 @@ static int gue_udp_recv(struct sock *sk, struct sk_buff *skb)
        /* guehdr may change after pull */
        guehdr = (struct guehdr *)&udp_hdr(skb)[1];
 
-       hdrlen = sizeof(struct guehdr) + optlen;
-
-       if (guehdr->version != 0 || validate_gue_flags(guehdr, optlen))
+       if (validate_gue_flags(guehdr, optlen))
                goto drop;
 
        hdrlen = sizeof(struct guehdr) + optlen;
@@ -212,13 +211,14 @@ static int gue_udp_recv(struct sock *sk, struct sk_buff *skb)
        if (unlikely(guehdr->control))
                return gue_control_message(skb, guehdr);
 
+       proto_ctype = guehdr->proto_ctype;
        __skb_pull(skb, sizeof(struct udphdr) + hdrlen);
        skb_reset_transport_header(skb);
 
        if (iptunnel_pull_offloads(skb))
                goto drop;
 
-       return -guehdr->proto_ctype;
+       return -proto_ctype;
 
 drop:
        kfree_skb(skb);
@@ -1137,7 +1137,7 @@ static int gue_err(struct sk_buff *skb, u32 info)
        case 0: /* Full GUE header present */
                break;
        case 1: {
-               /* Direct encasulation of IPv4 or IPv6 */
+               /* Direct encapsulation of IPv4 or IPv6 */
                skb_set_transport_header(skb, -(int)sizeof(struct icmphdr));
 
                switch (((struct iphdr *)guehdr)->version) {
index 6ea523d71947779b68d599de11a2a74535f578f0..a175e3e7ae97c53d0d9d8392589a86cdbc4c886e 100644 (file)
@@ -564,7 +564,7 @@ struct dst_entry *inet_csk_route_req(const struct sock *sk,
        rt = ip_route_output_flow(net, fl4, sk);
        if (IS_ERR(rt))
                goto no_route;
-       if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
+       if (opt && opt->opt.is_strictroute && rt->rt_gw_family)
                goto route_err;
        rcu_read_unlock();
        return &rt->dst;
@@ -602,7 +602,7 @@ struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
        rt = ip_route_output_flow(net, fl4, sk);
        if (IS_ERR(rt))
                goto no_route;
-       if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
+       if (opt && opt->opt.is_strictroute && rt->rt_gw_family)
                goto route_err;
        return &rt->dst;
 
index 00ec819f949b5e76ea96be901a697f4e12d5cf4d..06f6f280b9ff38864020e14a8a728b5258e65b0b 100644 (file)
@@ -123,7 +123,7 @@ int ip_forward(struct sk_buff *skb)
 
        rt = skb_rtable(skb);
 
-       if (opt->is_strictroute && rt->rt_uses_gateway)
+       if (opt->is_strictroute && rt->rt_gw_family)
                goto sr_failed;
 
        IPCB(skb)->flags |= IPSKB_FORWARDED;
index fd219f7bd3ea2c9263ac6d21ed3a66fd6442496c..4b052644147630fbfa8075ee623714ff5013bf94 100644 (file)
@@ -259,7 +259,6 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
        struct net *net = dev_net(skb->dev);
        struct metadata_dst *tun_dst = NULL;
        struct erspan_base_hdr *ershdr;
-       struct erspan_metadata *pkt_md;
        struct ip_tunnel_net *itn;
        struct ip_tunnel *tunnel;
        const struct iphdr *iph;
@@ -282,9 +281,6 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
                if (unlikely(!pskb_may_pull(skb, len)))
                        return PACKET_REJECT;
 
-               ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len);
-               pkt_md = (struct erspan_metadata *)(ershdr + 1);
-
                if (__iptunnel_pull_header(skb,
                                           len,
                                           htons(ETH_P_TEB),
@@ -292,8 +288,9 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
                        goto drop;
 
                if (tunnel->collect_md) {
+                       struct erspan_metadata *pkt_md, *md;
                        struct ip_tunnel_info *info;
-                       struct erspan_metadata *md;
+                       unsigned char *gh;
                        __be64 tun_id;
                        __be16 flags;
 
@@ -306,6 +303,14 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
                        if (!tun_dst)
                                return PACKET_REJECT;
 
+                       /* skb can be uncloned in __iptunnel_pull_header, so
+                        * old pkt_md is no longer valid and we need to reset
+                        * it
+                        */
+                       gh = skb_network_header(skb) +
+                            skb_network_header_len(skb);
+                       pkt_md = (struct erspan_metadata *)(gh + gre_hdr_len +
+                                                           sizeof(*ershdr));
                        md = ip_tunnel_info_opts(&tun_dst->u.tun_info);
                        md->version = ver;
                        md2 = &md->u.md2;
index ecce2dc78f17eb48d91f8f6638ef0a4e8076fedf..1132d6d1796a4f7c947da76b9b39e7fbe11d3399 100644 (file)
@@ -257,11 +257,10 @@ int ip_local_deliver(struct sk_buff *skb)
                       ip_local_deliver_finish);
 }
 
-static inline bool ip_rcv_options(struct sk_buff *skb)
+static inline bool ip_rcv_options(struct sk_buff *skb, struct net_device *dev)
 {
        struct ip_options *opt;
        const struct iphdr *iph;
-       struct net_device *dev = skb->dev;
 
        /* It looks as overkill, because not all
           IP options require packet mangling.
@@ -297,7 +296,7 @@ static inline bool ip_rcv_options(struct sk_buff *skb)
                        }
                }
 
-               if (ip_options_rcv_srr(skb))
+               if (ip_options_rcv_srr(skb, dev))
                        goto drop;
        }
 
@@ -353,7 +352,7 @@ static int ip_rcv_finish_core(struct net *net, struct sock *sk,
        }
 #endif
 
-       if (iph->ihl > 5 && ip_rcv_options(skb))
+       if (iph->ihl > 5 && ip_rcv_options(skb, dev))
                goto drop;
 
        rt = skb_rtable(skb);
index 32a35043c9f590314b7fa354d5e948b59e665214..3db31bb9df50622f8c9ae961f4eabc566d1cb74a 100644 (file)
@@ -612,7 +612,7 @@ void ip_forward_options(struct sk_buff *skb)
        }
 }
 
-int ip_options_rcv_srr(struct sk_buff *skb)
+int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev)
 {
        struct ip_options *opt = &(IPCB(skb)->opt);
        int srrspace, srrptr;
@@ -647,7 +647,7 @@ int ip_options_rcv_srr(struct sk_buff *skb)
 
                orefdst = skb->_skb_refdst;
                skb_dst_set(skb, NULL);
-               err = ip_route_input(skb, nexthop, iph->saddr, iph->tos, skb->dev);
+               err = ip_route_input(skb, nexthop, iph->saddr, iph->tos, dev);
                rt2 = skb_rtable(skb);
                if (err || (rt2->rt_type != RTN_UNICAST && rt2->rt_type != RTN_LOCAL)) {
                        skb_dst_drop(skb);
index c80188875f39238f8d8ff33603cacf279d3f903a..4e42c1974ba21b837cc7c6f1f09b647552f6b309 100644 (file)
@@ -188,7 +188,7 @@ static int ip_finish_output2(struct net *net, struct sock *sk, struct sk_buff *s
        struct net_device *dev = dst->dev;
        unsigned int hh_len = LL_RESERVED_SPACE(dev);
        struct neighbour *neigh;
-       u32 nexthop;
+       bool is_v6gw = false;
 
        if (rt->rt_type == RTN_MULTICAST) {
                IP_UPD_PO_STATS(net, IPSTATS_MIB_OUTMCAST, skb->len);
@@ -218,16 +218,13 @@ static int ip_finish_output2(struct net *net, struct sock *sk, struct sk_buff *s
        }
 
        rcu_read_lock_bh();
-       nexthop = (__force u32) rt_nexthop(rt, ip_hdr(skb)->daddr);
-       neigh = __ipv4_neigh_lookup_noref(dev, nexthop);
-       if (unlikely(!neigh))
-               neigh = __neigh_create(&arp_tbl, &nexthop, dev, false);
+       neigh = ip_neigh_for_gw(rt, skb, &is_v6gw);
        if (!IS_ERR(neigh)) {
                int res;
 
                sock_confirm_neigh(skb, neigh);
-               res = neigh_output(neigh, skb);
-
+               /* if crossing protocols, can not use the cached header */
+               res = neigh_output(neigh, skb, is_v6gw);
                rcu_read_unlock_bh();
                return res;
        }
@@ -472,7 +469,7 @@ int __ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
        skb_dst_set_noref(skb, &rt->dst);
 
 packet_routed:
-       if (inet_opt && inet_opt->opt.is_strictroute && rt->rt_uses_gateway)
+       if (inet_opt && inet_opt->opt.is_strictroute && rt->rt_gw_family)
                goto no_route;
 
        /* OK, we know where to send it, allocate and build IP header. */
@@ -693,11 +690,8 @@ int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
                        return 0;
                }
 
-               while (frag) {
-                       skb = frag->next;
-                       kfree_skb(frag);
-                       frag = skb;
-               }
+               kfree_skb_list(frag);
+
                IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
                return err;
 
index 2c931120c49402ad0bb3e946873cac66540ef0d7..9a3f13edc98e18fa7d2a7574423624d722c888d0 100644 (file)
@@ -373,7 +373,6 @@ static const struct rhashtable_params ipmr_rht_params = {
        .key_offset = offsetof(struct mfc_cache, cmparg),
        .key_len = sizeof(struct mfc_cache_cmp_arg),
        .nelem_hint = 3,
-       .locks_mul = 1,
        .obj_cmpfn = ipmr_hash_cmp,
        .automatic_shrinking = true,
 };
index c98391d492006d251c3e8c0f025cbbb370bbcc9f..1412b029f37f9444733762ae4d7571d61842e834 100644 (file)
@@ -27,14 +27,6 @@ config NF_TABLES_IPV4
 
 if NF_TABLES_IPV4
 
-config NFT_CHAIN_ROUTE_IPV4
-       tristate "IPv4 nf_tables route chain support"
-       help
-         This option enables the "route" chain for IPv4 in nf_tables. This
-         chain type is used to force packet re-routing after mangling header
-         fields such as the source, destination, type of service and
-         the packet mark.
-
 config NFT_REJECT_IPV4
        select NF_REJECT_IPV4
        default NFT_REJECT
@@ -232,16 +224,10 @@ if IP_NF_NAT
 
 config IP_NF_TARGET_MASQUERADE
        tristate "MASQUERADE target support"
-       select NF_NAT_MASQUERADE
-       default m if NETFILTER_ADVANCED=n
+       select NETFILTER_XT_TARGET_MASQUERADE
        help
-         Masquerading is a special case of NAT: all outgoing connections are
-         changed to seem to come from a particular interface's address, and
-         if the interface goes down, those connections are lost.  This is
-         only useful for dialup accounts with dynamic IP address (ie. your IP
-         address will be different on next dialup).
-
-         To compile it as a module, choose M here.  If unsure, say N.
+         This is a backwards-compat option for the user's convenience
+         (e.g. when running oldconfig). It selects NETFILTER_XT_TARGET_MASQUERADE.
 
 config IP_NF_TARGET_NETMAP
        tristate "NETMAP target support"
index e241f5188ebef0e36237f3144c6b827a8e3eadcb..c50e0ec095d2193bca5403abb4afcac726126dea 100644 (file)
@@ -24,7 +24,6 @@ nf_nat_snmp_basic-y := nf_nat_snmp_basic.asn1.o nf_nat_snmp_basic_main.o
 $(obj)/nf_nat_snmp_basic_main.o: $(obj)/nf_nat_snmp_basic.asn1.h
 obj-$(CONFIG_NF_NAT_SNMP_BASIC) += nf_nat_snmp_basic.o
 
-obj-$(CONFIG_NFT_CHAIN_ROUTE_IPV4) += nft_chain_route_ipv4.o
 obj-$(CONFIG_NFT_REJECT_IPV4) += nft_reject_ipv4.o
 obj-$(CONFIG_NFT_FIB_IPV4) += nft_fib_ipv4.o
 obj-$(CONFIG_NFT_DUP_IPV4) += nft_dup_ipv4.o
@@ -49,7 +48,6 @@ obj-$(CONFIG_IP_NF_MATCH_RPFILTER) += ipt_rpfilter.o
 # targets
 obj-$(CONFIG_IP_NF_TARGET_CLUSTERIP) += ipt_CLUSTERIP.o
 obj-$(CONFIG_IP_NF_TARGET_ECN) += ipt_ECN.o
-obj-$(CONFIG_IP_NF_TARGET_MASQUERADE) += ipt_MASQUERADE.o
 obj-$(CONFIG_IP_NF_TARGET_REJECT) += ipt_REJECT.o
 obj-$(CONFIG_IP_NF_TARGET_SYNPROXY) += ipt_SYNPROXY.o
 
diff --git a/net/ipv4/netfilter/nft_chain_route_ipv4.c b/net/ipv4/netfilter/nft_chain_route_ipv4.c
deleted file mode 100644 (file)
index 7d82934..0000000
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
- * Copyright (c) 2012 Pablo Neira Ayuso <pablo@netfilter.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/list.h>
-#include <linux/skbuff.h>
-#include <linux/netlink.h>
-#include <linux/netfilter.h>
-#include <linux/netfilter_ipv4.h>
-#include <linux/netfilter/nfnetlink.h>
-#include <linux/netfilter/nf_tables.h>
-#include <net/netfilter/nf_tables.h>
-#include <net/netfilter/nf_tables_ipv4.h>
-#include <net/route.h>
-#include <net/ip.h>
-
-static unsigned int nf_route_table_hook(void *priv,
-                                       struct sk_buff *skb,
-                                       const struct nf_hook_state *state)
-{
-       unsigned int ret;
-       struct nft_pktinfo pkt;
-       u32 mark;
-       __be32 saddr, daddr;
-       u_int8_t tos;
-       const struct iphdr *iph;
-       int err;
-
-       nft_set_pktinfo(&pkt, skb, state);
-       nft_set_pktinfo_ipv4(&pkt, skb);
-
-       mark = skb->mark;
-       iph = ip_hdr(skb);
-       saddr = iph->saddr;
-       daddr = iph->daddr;
-       tos = iph->tos;
-
-       ret = nft_do_chain(&pkt, priv);
-       if (ret != NF_DROP && ret != NF_STOLEN) {
-               iph = ip_hdr(skb);
-
-               if (iph->saddr != saddr ||
-                   iph->daddr != daddr ||
-                   skb->mark != mark ||
-                   iph->tos != tos) {
-                       err = ip_route_me_harder(state->net, skb, RTN_UNSPEC);
-                       if (err < 0)
-                               ret = NF_DROP_ERR(err);
-               }
-       }
-       return ret;
-}
-
-static const struct nft_chain_type nft_chain_route_ipv4 = {
-       .name           = "route",
-       .type           = NFT_CHAIN_T_ROUTE,
-       .family         = NFPROTO_IPV4,
-       .owner          = THIS_MODULE,
-       .hook_mask      = (1 << NF_INET_LOCAL_OUT),
-       .hooks          = {
-               [NF_INET_LOCAL_OUT]     = nf_route_table_hook,
-       },
-};
-
-static int __init nft_chain_route_init(void)
-{
-       nft_register_chain_type(&nft_chain_route_ipv4);
-
-       return 0;
-}
-
-static void __exit nft_chain_route_exit(void)
-{
-       nft_unregister_chain_type(&nft_chain_route_ipv4);
-}
-
-module_init(nft_chain_route_init);
-module_exit(nft_chain_route_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
-MODULE_ALIAS_NFT_CHAIN(AF_INET, "route");
index 7977514d90f59e873b7403cd552d1bda9567f067..d9b5aa2290d63daf2290a7fbbae5093a69ad6330 100644 (file)
@@ -434,37 +434,46 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
                                           struct sk_buff *skb,
                                           const void *daddr)
 {
+       const struct rtable *rt = container_of(dst, struct rtable, dst);
        struct net_device *dev = dst->dev;
-       const __be32 *pkey = daddr;
-       const struct rtable *rt;
        struct neighbour *n;
 
-       rt = (const struct rtable *) dst;
-       if (rt->rt_gateway)
-               pkey = (const __be32 *) &rt->rt_gateway;
-       else if (skb)
-               pkey = &ip_hdr(skb)->daddr;
+       rcu_read_lock_bh();
+
+       if (likely(rt->rt_gw_family == AF_INET)) {
+               n = ip_neigh_gw4(dev, rt->rt_gw4);
+       } else if (rt->rt_gw_family == AF_INET6) {
+               n = ip_neigh_gw6(dev, &rt->rt_gw6);
+        } else {
+               __be32 pkey;
+
+               pkey = skb ? ip_hdr(skb)->daddr : *((__be32 *) daddr);
+               n = ip_neigh_gw4(dev, pkey);
+       }
 
-       n = __ipv4_neigh_lookup(dev, *(__force u32 *)pkey);
-       if (n)
-               return n;
-       return neigh_create(&arp_tbl, pkey, dev);
+       if (n && !refcount_inc_not_zero(&n->refcnt))
+               n = NULL;
+
+       rcu_read_unlock_bh();
+
+       return n;
 }
 
 static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr)
 {
+       const struct rtable *rt = container_of(dst, struct rtable, dst);
        struct net_device *dev = dst->dev;
        const __be32 *pkey = daddr;
-       const struct rtable *rt;
 
-       rt = (const struct rtable *)dst;
-       if (rt->rt_gateway)
-               pkey = (const __be32 *)&rt->rt_gateway;
-       else if (!daddr ||
+       if (rt->rt_gw_family == AF_INET) {
+               pkey = (const __be32 *)&rt->rt_gw4;
+       } else if (rt->rt_gw_family == AF_INET6) {
+               return __ipv6_confirm_neigh_stub(dev, &rt->rt_gw6);
+       } else if (!daddr ||
                 (rt->rt_flags &
-                 (RTCF_MULTICAST | RTCF_BROADCAST | RTCF_LOCAL)))
+                 (RTCF_MULTICAST | RTCF_BROADCAST | RTCF_LOCAL))) {
                return;
-
+       }
        __ipv4_confirm_neigh(dev, *(__force u32 *)pkey);
 }
 
@@ -629,8 +638,8 @@ static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnh
 
        if (fnhe->fnhe_gw) {
                rt->rt_flags |= RTCF_REDIRECTED;
-               rt->rt_gateway = fnhe->fnhe_gw;
-               rt->rt_uses_gateway = 1;
+               rt->rt_gw_family = AF_INET;
+               rt->rt_gw4 = fnhe->fnhe_gw;
        }
 }
 
@@ -747,7 +756,7 @@ static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flow
                return;
        }
 
-       if (rt->rt_gateway != old_gw)
+       if (rt->rt_gw_family != AF_INET || rt->rt_gw4 != old_gw)
                return;
 
        in_dev = __in_dev_get_rcu(dev);
@@ -778,8 +787,10 @@ static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flow
                        neigh_event_send(n, NULL);
                } else {
                        if (fib_lookup(net, fl4, &res, 0) == 0) {
-                               struct fib_nh *nh = &FIB_RES_NH(res);
+                               struct fib_nh_common *nhc = FIB_RES_NHC(res);
+                               struct fib_nh *nh;
 
+                               nh = container_of(nhc, struct fib_nh, nh_common);
                                update_or_create_fnhe(nh, fl4->daddr, new_gw,
                                                0, false,
                                                jiffies + ip_rt_gc_timeout);
@@ -1027,8 +1038,10 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
 
        rcu_read_lock();
        if (fib_lookup(dev_net(dst->dev), fl4, &res, 0) == 0) {
-               struct fib_nh *nh = &FIB_RES_NH(res);
+               struct fib_nh_common *nhc = FIB_RES_NHC(res);
+               struct fib_nh *nh;
 
+               nh = container_of(nhc, struct fib_nh, nh_common);
                update_or_create_fnhe(nh, fl4->daddr, 0, mtu, lock,
                                      jiffies + ip_rt_mtu_expires);
        }
@@ -1187,9 +1200,23 @@ static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
 
 static void ipv4_link_failure(struct sk_buff *skb)
 {
+       struct ip_options opt;
        struct rtable *rt;
+       int res;
+
+       /* Recompile ip options since IPCB may not be valid anymore.
+        */
+       memset(&opt, 0, sizeof(opt));
+       opt.optlen = ip_hdr(skb)->ihl*4 - sizeof(struct iphdr);
 
-       icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
+       rcu_read_lock();
+       res = __ip_options_compile(dev_net(skb->dev), &opt, skb, NULL);
+       rcu_read_unlock();
+
+       if (res)
+               return;
+
+       __icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, &opt);
 
        rt = skb_rtable(skb);
        if (rt)
@@ -1235,7 +1262,7 @@ void ip_rt_get_source(u8 *addr, struct sk_buff *skb, struct rtable *rt)
 
                rcu_read_lock();
                if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res, 0) == 0)
-                       src = FIB_RES_PREFSRC(dev_net(rt->dst.dev), res);
+                       src = fib_result_prefsrc(dev_net(rt->dst.dev), &res);
                else
                        src = inet_select_addr(rt->dst.dev,
                                               rt_nexthop(rt, iph->daddr),
@@ -1278,7 +1305,7 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst)
        mtu = READ_ONCE(dst->dev->mtu);
 
        if (unlikely(ip_mtu_locked(dst))) {
-               if (rt->rt_uses_gateway && mtu > 576)
+               if (rt->rt_gw_family && mtu > 576)
                        mtu = 576;
        }
 
@@ -1354,9 +1381,9 @@ static struct fib_nh_exception *find_exception(struct fib_nh *nh, __be32 daddr)
 
 u32 ip_mtu_from_fib_result(struct fib_result *res, __be32 daddr)
 {
+       struct fib_nh_common *nhc = res->nhc;
+       struct net_device *dev = nhc->nhc_dev;
        struct fib_info *fi = res->fi;
-       struct fib_nh *nh = &fi->fib_nh[res->nh_sel];
-       struct net_device *dev = nh->fib_nh_dev;
        u32 mtu = 0;
 
        if (dev_net(dev)->ipv4.sysctl_ip_fwd_use_pmtu ||
@@ -1364,6 +1391,7 @@ u32 ip_mtu_from_fib_result(struct fib_result *res, __be32 daddr)
                mtu = fi->fib_mtu;
 
        if (likely(!mtu)) {
+               struct fib_nh *nh = container_of(nhc, struct fib_nh, nh_common);
                struct fib_nh_exception *fnhe;
 
                fnhe = find_exception(nh, daddr);
@@ -1374,7 +1402,7 @@ u32 ip_mtu_from_fib_result(struct fib_result *res, __be32 daddr)
        if (likely(!mtu))
                mtu = min(READ_ONCE(dev->mtu), IP_MAX_MTU);
 
-       return mtu - lwtunnel_headroom(nh->fib_nh_lws, mtu);
+       return mtu - lwtunnel_headroom(nhc->nhc_lwtstate, mtu);
 }
 
 static bool rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe,
@@ -1405,8 +1433,10 @@ static bool rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe,
                        orig = NULL;
                }
                fill_route_from_fnhe(rt, fnhe);
-               if (!rt->rt_gateway)
-                       rt->rt_gateway = daddr;
+               if (!rt->rt_gw4) {
+                       rt->rt_gw4 = daddr;
+                       rt->rt_gw_family = AF_INET;
+               }
 
                if (do_cache) {
                        dst_hold(&rt->dst);
@@ -1529,14 +1559,21 @@ static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
        bool cached = false;
 
        if (fi) {
-               struct fib_nh *nh = &FIB_RES_NH(*res);
-
-               if (nh->fib_nh_gw4 && nh->fib_nh_scope == RT_SCOPE_LINK) {
-                       rt->rt_gateway = nh->fib_nh_gw4;
-                       rt->rt_uses_gateway = 1;
+               struct fib_nh_common *nhc = FIB_RES_NHC(*res);
+               struct fib_nh *nh;
+
+               if (nhc->nhc_gw_family && nhc->nhc_scope == RT_SCOPE_LINK) {
+                       rt->rt_gw_family = nhc->nhc_gw_family;
+                       /* only INET and INET6 are supported */
+                       if (likely(nhc->nhc_gw_family == AF_INET))
+                               rt->rt_gw4 = nhc->nhc_gw.ipv4;
+                       else
+                               rt->rt_gw6 = nhc->nhc_gw.ipv6;
                }
+
                ip_dst_init_metrics(&rt->dst, fi->fib_metrics);
 
+               nh = container_of(nhc, struct fib_nh, nh_common);
 #ifdef CONFIG_IP_ROUTE_CLASSID
                rt->dst.tclassid = nh->nh_tclassid;
 #endif
@@ -1551,8 +1588,10 @@ static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
                         * However, if we are unsuccessful at storing this
                         * route into the cache we really need to set it.
                         */
-                       if (!rt->rt_gateway)
-                               rt->rt_gateway = daddr;
+                       if (!rt->rt_gw4) {
+                               rt->rt_gw_family = AF_INET;
+                               rt->rt_gw4 = daddr;
+                       }
                        rt_add_uncached_list(rt);
                }
        } else
@@ -1585,8 +1624,8 @@ struct rtable *rt_dst_alloc(struct net_device *dev,
                rt->rt_iif = 0;
                rt->rt_pmtu = 0;
                rt->rt_mtu_locked = 0;
-               rt->rt_gateway = 0;
-               rt->rt_uses_gateway = 0;
+               rt->rt_gw_family = 0;
+               rt->rt_gw4 = 0;
                INIT_LIST_HEAD(&rt->rt_uncached);
 
                rt->dst.output = ip_output;
@@ -1699,15 +1738,18 @@ static int __mkroute_input(struct sk_buff *skb,
                           struct in_device *in_dev,
                           __be32 daddr, __be32 saddr, u32 tos)
 {
+       struct fib_nh_common *nhc = FIB_RES_NHC(*res);
+       struct net_device *dev = nhc->nhc_dev;
        struct fib_nh_exception *fnhe;
        struct rtable *rth;
+       struct fib_nh *nh;
        int err;
        struct in_device *out_dev;
        bool do_cache;
        u32 itag = 0;
 
        /* get a working reference to the output device */
-       out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res));
+       out_dev = __in_dev_get_rcu(dev);
        if (!out_dev) {
                net_crit_ratelimited("Bug in ip_route_input_slow(). Please report.\n");
                return -EINVAL;
@@ -1724,10 +1766,14 @@ static int __mkroute_input(struct sk_buff *skb,
 
        do_cache = res->fi && !itag;
        if (out_dev == in_dev && err && IN_DEV_TX_REDIRECTS(out_dev) &&
-           skb->protocol == htons(ETH_P_IP) &&
-           (IN_DEV_SHARED_MEDIA(out_dev) ||
-            inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res))))
-               IPCB(skb)->flags |= IPSKB_DOREDIRECT;
+           skb->protocol == htons(ETH_P_IP)) {
+               __be32 gw;
+
+               gw = nhc->nhc_gw_family == AF_INET ? nhc->nhc_gw.ipv4 : 0;
+               if (IN_DEV_SHARED_MEDIA(out_dev) ||
+                   inet_addr_onlink(out_dev, saddr, gw))
+                       IPCB(skb)->flags |= IPSKB_DOREDIRECT;
+       }
 
        if (skb->protocol != htons(ETH_P_IP)) {
                /* Not IP (i.e. ARP). Do not create route, if it is
@@ -1744,12 +1790,13 @@ static int __mkroute_input(struct sk_buff *skb,
                }
        }
 
-       fnhe = find_exception(&FIB_RES_NH(*res), daddr);
+       nh = container_of(nhc, struct fib_nh, nh_common);
+       fnhe = find_exception(nh, daddr);
        if (do_cache) {
                if (fnhe)
                        rth = rcu_dereference(fnhe->fnhe_rth_input);
                else
-                       rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
+                       rth = rcu_dereference(nh->nh_rth_input);
                if (rt_cache_valid(rth)) {
                        skb_dst_set_noref(skb, &rth->dst);
                        goto out;
@@ -2043,7 +2090,11 @@ out:     return err;
        do_cache = false;
        if (res->fi) {
                if (!itag) {
-                       rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
+                       struct fib_nh_common *nhc = FIB_RES_NHC(*res);
+                       struct fib_nh *nh;
+
+                       nh = container_of(nhc, struct fib_nh, nh_common);
+                       rth = rcu_dereference(nh->nh_rth_input);
                        if (rt_cache_valid(rth)) {
                                skb_dst_set_noref(skb, &rth->dst);
                                err = 0;
@@ -2073,15 +2124,17 @@ out:    return err;
        }
 
        if (do_cache) {
-               struct fib_nh *nh = &FIB_RES_NH(*res);
+               struct fib_nh_common *nhc = FIB_RES_NHC(*res);
+               struct fib_nh *nh;
 
-               rth->dst.lwtstate = lwtstate_get(nh->fib_nh_lws);
+               rth->dst.lwtstate = lwtstate_get(nhc->nhc_lwtstate);
                if (lwtunnel_input_redirect(rth->dst.lwtstate)) {
                        WARN_ON(rth->dst.input == lwtunnel_input);
                        rth->dst.lwtstate->orig_input = rth->dst.input;
                        rth->dst.input = lwtunnel_input;
                }
 
+               nh = container_of(nhc, struct fib_nh, nh_common);
                if (unlikely(!rt_cache_route(nh, rth)))
                        rt_add_uncached_list(rth);
        }
@@ -2253,8 +2306,9 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
        fnhe = NULL;
        do_cache &= fi != NULL;
        if (fi) {
+               struct fib_nh_common *nhc = FIB_RES_NHC(*res);
+               struct fib_nh *nh = container_of(nhc, struct fib_nh, nh_common);
                struct rtable __rcu **prth;
-               struct fib_nh *nh = &FIB_RES_NH(*res);
 
                fnhe = find_exception(nh, fl4->daddr);
                if (!do_cache)
@@ -2264,8 +2318,8 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
                } else {
                        if (unlikely(fl4->flowi4_flags &
                                     FLOWI_FLAG_KNOWN_NH &&
-                                    !(nh->fib_nh_gw4 &&
-                                      nh->fib_nh_scope == RT_SCOPE_LINK))) {
+                                    !(nhc->nhc_gw_family &&
+                                      nhc->nhc_scope == RT_SCOPE_LINK))) {
                                do_cache = false;
                                goto add;
                        }
@@ -2574,8 +2628,11 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or
                rt->rt_genid = rt_genid_ipv4(net);
                rt->rt_flags = ort->rt_flags;
                rt->rt_type = ort->rt_type;
-               rt->rt_gateway = ort->rt_gateway;
-               rt->rt_uses_gateway = ort->rt_uses_gateway;
+               rt->rt_gw_family = ort->rt_gw_family;
+               if (rt->rt_gw_family == AF_INET)
+                       rt->rt_gw4 = ort->rt_gw4;
+               else if (rt->rt_gw_family == AF_INET6)
+                       rt->rt_gw6 = ort->rt_gw6;
 
                INIT_LIST_HEAD(&rt->rt_uncached);
        }
@@ -2654,9 +2711,22 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
                if (nla_put_in_addr(skb, RTA_PREFSRC, fl4->saddr))
                        goto nla_put_failure;
        }
-       if (rt->rt_uses_gateway &&
-           nla_put_in_addr(skb, RTA_GATEWAY, rt->rt_gateway))
+       if (rt->rt_gw_family == AF_INET &&
+           nla_put_in_addr(skb, RTA_GATEWAY, rt->rt_gw4)) {
                goto nla_put_failure;
+       } else if (rt->rt_gw_family == AF_INET6) {
+               int alen = sizeof(struct in6_addr);
+               struct nlattr *nla;
+               struct rtvia *via;
+
+               nla = nla_reserve(skb, RTA_VIA, alen + 2);
+               if (!nla)
+                       goto nla_put_failure;
+
+               via = nla_data(nla);
+               via->rtvia_family = AF_INET6;
+               memcpy(via->rtvia_addr, &rt->rt_gw6, alen);
+       }
 
        expires = rt->dst.expires;
        if (expires) {
index cd4814f7e96223447195f0d0ac224c54d5501d2e..477cb4aa456c11c70185a982cbadafba857d3619 100644 (file)
@@ -49,9 +49,8 @@
 #define DCTCP_MAX_ALPHA        1024U
 
 struct dctcp {
-       u32 acked_bytes_ecn;
-       u32 acked_bytes_total;
-       u32 prior_snd_una;
+       u32 old_delivered;
+       u32 old_delivered_ce;
        u32 prior_rcv_nxt;
        u32 dctcp_alpha;
        u32 next_seq;
@@ -67,19 +66,14 @@ static unsigned int dctcp_alpha_on_init __read_mostly = DCTCP_MAX_ALPHA;
 module_param(dctcp_alpha_on_init, uint, 0644);
 MODULE_PARM_DESC(dctcp_alpha_on_init, "parameter for initial alpha value");
 
-static unsigned int dctcp_clamp_alpha_on_loss __read_mostly;
-module_param(dctcp_clamp_alpha_on_loss, uint, 0644);
-MODULE_PARM_DESC(dctcp_clamp_alpha_on_loss,
-                "parameter for clamping alpha on loss");
-
 static struct tcp_congestion_ops dctcp_reno;
 
 static void dctcp_reset(const struct tcp_sock *tp, struct dctcp *ca)
 {
        ca->next_seq = tp->snd_nxt;
 
-       ca->acked_bytes_ecn = 0;
-       ca->acked_bytes_total = 0;
+       ca->old_delivered = tp->delivered;
+       ca->old_delivered_ce = tp->delivered_ce;
 }
 
 static void dctcp_init(struct sock *sk)
@@ -91,7 +85,6 @@ static void dctcp_init(struct sock *sk)
             sk->sk_state == TCP_CLOSE)) {
                struct dctcp *ca = inet_csk_ca(sk);
 
-               ca->prior_snd_una = tp->snd_una;
                ca->prior_rcv_nxt = tp->rcv_nxt;
 
                ca->dctcp_alpha = min(dctcp_alpha_on_init, DCTCP_MAX_ALPHA);
@@ -123,37 +116,25 @@ static void dctcp_update_alpha(struct sock *sk, u32 flags)
 {
        const struct tcp_sock *tp = tcp_sk(sk);
        struct dctcp *ca = inet_csk_ca(sk);
-       u32 acked_bytes = tp->snd_una - ca->prior_snd_una;
-
-       /* If ack did not advance snd_una, count dupack as MSS size.
-        * If ack did update window, do not count it at all.
-        */
-       if (acked_bytes == 0 && !(flags & CA_ACK_WIN_UPDATE))
-               acked_bytes = inet_csk(sk)->icsk_ack.rcv_mss;
-       if (acked_bytes) {
-               ca->acked_bytes_total += acked_bytes;
-               ca->prior_snd_una = tp->snd_una;
-
-               if (flags & CA_ACK_ECE)
-                       ca->acked_bytes_ecn += acked_bytes;
-       }
 
        /* Expired RTT */
        if (!before(tp->snd_una, ca->next_seq)) {
-               u64 bytes_ecn = ca->acked_bytes_ecn;
+               u32 delivered_ce = tp->delivered_ce - ca->old_delivered_ce;
                u32 alpha = ca->dctcp_alpha;
 
                /* alpha = (1 - g) * alpha + g * F */
 
                alpha -= min_not_zero(alpha, alpha >> dctcp_shift_g);
-               if (bytes_ecn) {
+               if (delivered_ce) {
+                       u32 delivered = tp->delivered - ca->old_delivered;
+
                        /* If dctcp_shift_g == 1, a 32bit value would overflow
-                        * after 8 Mbytes.
+                        * after 8 M packets.
                         */
-                       bytes_ecn <<= (10 - dctcp_shift_g);
-                       do_div(bytes_ecn, max(1U, ca->acked_bytes_total));
+                       delivered_ce <<= (10 - dctcp_shift_g);
+                       delivered_ce /= max(1U, delivered);
 
-                       alpha = min(alpha + (u32)bytes_ecn, DCTCP_MAX_ALPHA);
+                       alpha = min(alpha + delivered_ce, DCTCP_MAX_ALPHA);
                }
                /* dctcp_alpha can be read from dctcp_get_info() without
                 * synchro, so we ask compiler to not use dctcp_alpha
@@ -164,21 +145,23 @@ static void dctcp_update_alpha(struct sock *sk, u32 flags)
        }
 }
 
-static void dctcp_state(struct sock *sk, u8 new_state)
+static void dctcp_react_to_loss(struct sock *sk)
 {
-       if (dctcp_clamp_alpha_on_loss && new_state == TCP_CA_Loss) {
-               struct dctcp *ca = inet_csk_ca(sk);
+       struct dctcp *ca = inet_csk_ca(sk);
+       struct tcp_sock *tp = tcp_sk(sk);
 
-               /* If this extension is enabled, we clamp dctcp_alpha to
-                * max on packet loss; the motivation is that dctcp_alpha
-                * is an indicator to the extend of congestion and packet
-                * loss is an indicator of extreme congestion; setting
-                * this in practice turned out to be beneficial, and
-                * effectively assumes total congestion which reduces the
-                * window by half.
-                */
-               ca->dctcp_alpha = DCTCP_MAX_ALPHA;
-       }
+       ca->loss_cwnd = tp->snd_cwnd;
+       tp->snd_ssthresh = max(tp->snd_cwnd >> 1U, 2U);
+}
+
+static void dctcp_state(struct sock *sk, u8 new_state)
+{
+       if (new_state == TCP_CA_Recovery &&
+           new_state != inet_csk(sk)->icsk_ca_state)
+               dctcp_react_to_loss(sk);
+       /* We handle RTO in dctcp_cwnd_event to ensure that we perform only
+        * one loss-adjustment per RTT.
+        */
 }
 
 static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev)
@@ -190,6 +173,9 @@ static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev)
        case CA_EVENT_ECN_NO_CE:
                dctcp_ece_ack_update(sk, ev, &ca->prior_rcv_nxt, &ca->ce_state);
                break;
+       case CA_EVENT_LOSS:
+               dctcp_react_to_loss(sk);
+               break;
        default:
                /* Don't care for the rest. */
                break;
@@ -200,6 +186,7 @@ static size_t dctcp_get_info(struct sock *sk, u32 ext, int *attr,
                             union tcp_cc_info *info)
 {
        const struct dctcp *ca = inet_csk_ca(sk);
+       const struct tcp_sock *tp = tcp_sk(sk);
 
        /* Fill it also in case of VEGASINFO due to req struct limits.
         * We can still correctly retrieve it later.
@@ -211,8 +198,10 @@ static size_t dctcp_get_info(struct sock *sk, u32 ext, int *attr,
                        info->dctcp.dctcp_enabled = 1;
                        info->dctcp.dctcp_ce_state = (u16) ca->ce_state;
                        info->dctcp.dctcp_alpha = ca->dctcp_alpha;
-                       info->dctcp.dctcp_ab_ecn = ca->acked_bytes_ecn;
-                       info->dctcp.dctcp_ab_tot = ca->acked_bytes_total;
+                       info->dctcp.dctcp_ab_ecn = tp->mss_cache *
+                                                  (tp->delivered_ce - ca->old_delivered_ce);
+                       info->dctcp.dctcp_ab_tot = tp->mss_cache *
+                                                  (tp->delivered - ca->old_delivered);
                }
 
                *attr = INET_DIAG_DCTCPINFO;
index 5dfbc333e79ae98993515b8f493de1b14af3d344..97671bff597adb8cd2aab268f344ff3ae41a4ebb 100644 (file)
@@ -402,11 +402,12 @@ static int __tcp_grow_window(const struct sock *sk, const struct sk_buff *skb)
 static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb)
 {
        struct tcp_sock *tp = tcp_sk(sk);
+       int room;
+
+       room = min_t(int, tp->window_clamp, tcp_space(sk)) - tp->rcv_ssthresh;
 
        /* Check #1 */
-       if (tp->rcv_ssthresh < tp->window_clamp &&
-           (int)tp->rcv_ssthresh < tcp_space(sk) &&
-           !tcp_under_memory_pressure(sk)) {
+       if (room > 0 && !tcp_under_memory_pressure(sk)) {
                int incr;
 
                /* Check #2. Increase window, if skb with such overhead
@@ -419,8 +420,7 @@ static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb)
 
                if (incr) {
                        incr = max_t(int, incr, 2 * skb->len);
-                       tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr,
-                                              tp->window_clamp);
+                       tp->rcv_ssthresh += min(room, incr);
                        inet_csk(sk)->icsk_ack.quick |= 1;
                }
        }
@@ -6263,6 +6263,11 @@ static inline void pr_drop_req(struct request_sock *req, __u16 port, int family)
  * congestion control: Linux DCTCP asserts ECT on all packets,
  * including SYN, which is most optimal solution; however,
  * others, such as FreeBSD do not.
+ *
+ * Exception: At least one of the reserved bits of the TCP header (th->res1) is
+ * set, indicating the use of a future TCP extension (such as AccECN). See
+ * RFC8311 Â§4.3 which updates RFC3168 to allow the development of such
+ * extensions.
  */
 static void tcp_ecn_create_request(struct request_sock *req,
                                   const struct sk_buff *skb,
@@ -6282,7 +6287,7 @@ static void tcp_ecn_create_request(struct request_sock *req,
        ecn_ok_dst = dst_feature(dst, DST_FEATURE_ECN_MASK);
        ecn_ok = net->ipv4.sysctl_tcp_ecn || ecn_ok_dst;
 
-       if ((!ect && ecn_ok) || tcp_ca_needs_ecn(listen_sk) ||
+       if (((!ect || th->res1) && ecn_ok) || tcp_ca_needs_ecn(listen_sk) ||
            (ecn_ok_dst & DST_FEATURE_ECN_CA) ||
            tcp_bpf_ca_needs_ecn((struct sock *)req))
                inet_rsk(req)->ecn_ok = 1;
index 3979939804b70b805655d94c598a6cb397e35947..faa6fa619f59cb2171a450cff9d6b7e60fc8aa5c 100644 (file)
@@ -2585,7 +2585,8 @@ static void __net_exit tcp_sk_exit(struct net *net)
 {
        int cpu;
 
-       module_put(net->ipv4.tcp_congestion_control->owner);
+       if (net->ipv4.tcp_congestion_control)
+               module_put(net->ipv4.tcp_congestion_control->owner);
 
        for_each_possible_cpu(cpu)
                inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
index e265d1aeeb66996da3903eef383eaec9a320cce0..32061928b054275c546dae1141f45f0717018c78 100644 (file)
@@ -3088,7 +3088,6 @@ void tcp_send_fin(struct sock *sk)
                tskb = skb_rb_last(&sk->tcp_rtx_queue);
 
        if (tskb) {
-coalesce:
                TCP_SKB_CB(tskb)->tcp_flags |= TCPHDR_FIN;
                TCP_SKB_CB(tskb)->end_seq++;
                tp->write_seq++;
@@ -3104,11 +3103,9 @@ void tcp_send_fin(struct sock *sk)
                }
        } else {
                skb = alloc_skb_fclone(MAX_TCP_HEADER, sk->sk_allocation);
-               if (unlikely(!skb)) {
-                       if (tskb)
-                               goto coalesce;
+               if (unlikely(!skb))
                        return;
-               }
+
                INIT_LIST_HEAD(&skb->tcp_tsorted_anchor);
                skb_reserve(skb, MAX_TCP_HEADER);
                sk_forced_mem_schedule(sk, skb->truesize);
index 372fdc5381a98e0d8a673ef1649323f91764ad8e..3c58ba02af7dec49f454d6998a3436b46c248084 100644 (file)
@@ -1631,7 +1631,7 @@ int udp_ioctl(struct sock *sk, int cmd, unsigned long arg)
 EXPORT_SYMBOL(udp_ioctl);
 
 struct sk_buff *__skb_recv_udp(struct sock *sk, unsigned int flags,
-                              int noblock, int *peeked, int *off, int *err)
+                              int noblock, int *off, int *err)
 {
        struct sk_buff_head *sk_queue = &sk->sk_receive_queue;
        struct sk_buff_head *queue;
@@ -1650,13 +1650,11 @@ struct sk_buff *__skb_recv_udp(struct sock *sk, unsigned int flags,
                        break;
 
                error = -EAGAIN;
-               *peeked = 0;
                do {
                        spin_lock_bh(&queue->lock);
                        skb = __skb_try_recv_from_queue(sk, queue, flags,
                                                        udp_skb_destructor,
-                                                       peeked, off, err,
-                                                       &last);
+                                                       off, err, &last);
                        if (skb) {
                                spin_unlock_bh(&queue->lock);
                                return skb;
@@ -1677,8 +1675,7 @@ struct sk_buff *__skb_recv_udp(struct sock *sk, unsigned int flags,
 
                        skb = __skb_try_recv_from_queue(sk, queue, flags,
                                                        udp_skb_dtor_locked,
-                                                       peeked, off, err,
-                                                       &last);
+                                                       off, err, &last);
                        spin_unlock(&sk_queue->lock);
                        spin_unlock_bh(&queue->lock);
                        if (skb)
@@ -1713,8 +1710,7 @@ int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
        DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name);
        struct sk_buff *skb;
        unsigned int ulen, copied;
-       int peeked, peeking, off;
-       int err;
+       int off, err, peeking = flags & MSG_PEEK;
        int is_udplite = IS_UDPLITE(sk);
        bool checksum_valid = false;
 
@@ -1722,9 +1718,8 @@ int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
                return ip_recv_error(sk, msg, len, addr_len);
 
 try_again:
-       peeking = flags & MSG_PEEK;
        off = sk_peek_offset(sk, flags);
-       skb = __skb_recv_udp(sk, flags, noblock, &peeked, &off, &err);
+       skb = __skb_recv_udp(sk, flags, noblock, &off, &err);
        if (!skb)
                return err;
 
@@ -1762,7 +1757,7 @@ int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
        }
 
        if (unlikely(err)) {
-               if (!peeked) {
+               if (!peeking) {
                        atomic_inc(&sk->sk_drops);
                        UDP_INC_STATS(sock_net(sk),
                                      UDP_MIB_INERRORS, is_udplite);
@@ -1771,7 +1766,7 @@ int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
                return err;
        }
 
-       if (!peeked)
+       if (!peeking)
                UDP_INC_STATS(sock_net(sk),
                              UDP_MIB_INDATAGRAMS, is_udplite);
 
index d73a6d6652f60f8b81d47bb36766aa0d0329f3ce..72d19b1838ed5dbe14bfbc21b36ae1ff6b13600b 100644 (file)
@@ -97,8 +97,11 @@ static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
        xdst->u.rt.rt_flags = rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST |
                                              RTCF_LOCAL);
        xdst->u.rt.rt_type = rt->rt_type;
-       xdst->u.rt.rt_gateway = rt->rt_gateway;
-       xdst->u.rt.rt_uses_gateway = rt->rt_uses_gateway;
+       xdst->u.rt.rt_gw_family = rt->rt_gw_family;
+       if (rt->rt_gw_family == AF_INET)
+               xdst->u.rt.rt_gw4 = rt->rt_gw4;
+       else if (rt->rt_gw_family == AF_INET6)
+               xdst->u.rt.rt_gw6 = rt->rt_gw6;
        xdst->u.rt.rt_pmtu = rt->rt_pmtu;
        xdst->u.rt.rt_mtu_locked = rt->rt_mtu_locked;
        INIT_LIST_HEAD(&xdst->u.rt.rt_uncached);
index 2e8d1d2d8d3d50297466eb62c14b6c845bc8f655..340a0f06f97434d0c0621c1dea5477b4aa418821 100644 (file)
@@ -2421,7 +2421,7 @@ static struct fib6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
        for_each_fib6_node_rt_rcu(fn) {
                if (rt->fib6_nh.fib_nh_dev->ifindex != dev->ifindex)
                        continue;
-               if (no_gw && rt->fib6_nh.fib_nh_has_gw)
+               if (no_gw && rt->fib6_nh.fib_nh_gw_family)
                        continue;
                if ((rt->fib6_flags & flags) != flags)
                        continue;
index 945b66e3008fb42433cf30fad477fa04a095a94c..763a947e0d14d3455492b0a13c96153c6936535d 100644 (file)
@@ -144,43 +144,53 @@ static struct fib6_table *eafnosupport_fib6_get_table(struct net *net, u32 id)
        return NULL;
 }
 
-static struct fib6_info *
+static int
 eafnosupport_fib6_table_lookup(struct net *net, struct fib6_table *table,
-                              int oif, struct flowi6 *fl6, int flags)
+                              int oif, struct flowi6 *fl6,
+                              struct fib6_result *res, int flags)
 {
-       return NULL;
+       return -EAFNOSUPPORT;
 }
 
-static struct fib6_info *
+static int
 eafnosupport_fib6_lookup(struct net *net, int oif, struct flowi6 *fl6,
-                        int flags)
+                        struct fib6_result *res, int flags)
 {
-       return NULL;
+       return -EAFNOSUPPORT;
 }
 
-static struct fib6_info *
-eafnosupport_fib6_multipath_select(const struct net *net, struct fib6_info *f6i,
-                                  struct flowi6 *fl6, int oif,
-                                  const struct sk_buff *skb, int strict)
+static void
+eafnosupport_fib6_select_path(const struct net *net, struct fib6_result *res,
+                             struct flowi6 *fl6, int oif, bool have_oif_match,
+                             const struct sk_buff *skb, int strict)
 {
-       return f6i;
 }
 
 static u32
-eafnosupport_ip6_mtu_from_fib6(struct fib6_info *f6i, struct in6_addr *daddr,
-                              struct in6_addr *saddr)
+eafnosupport_ip6_mtu_from_fib6(const struct fib6_result *res,
+                              const struct in6_addr *daddr,
+                              const struct in6_addr *saddr)
 {
        return 0;
 }
 
+static int eafnosupport_fib6_nh_init(struct net *net, struct fib6_nh *fib6_nh,
+                                    struct fib6_config *cfg, gfp_t gfp_flags,
+                                    struct netlink_ext_ack *extack)
+{
+       NL_SET_ERR_MSG(extack, "IPv6 support not enabled in kernel");
+       return -EAFNOSUPPORT;
+}
+
 const struct ipv6_stub *ipv6_stub __read_mostly = &(struct ipv6_stub) {
        .ipv6_dst_lookup   = eafnosupport_ipv6_dst_lookup,
        .ipv6_route_input  = eafnosupport_ipv6_route_input,
        .fib6_get_table    = eafnosupport_fib6_get_table,
        .fib6_table_lookup = eafnosupport_fib6_table_lookup,
        .fib6_lookup       = eafnosupport_fib6_lookup,
-       .fib6_multipath_select = eafnosupport_fib6_multipath_select,
+       .fib6_select_path  = eafnosupport_fib6_select_path,
        .ip6_mtu_from_fib6 = eafnosupport_ip6_mtu_from_fib6,
+       .fib6_nh_init      = eafnosupport_fib6_nh_init,
 };
 EXPORT_SYMBOL_GPL(ipv6_stub);
 
index 1789bf99c4196e264f0885c018ff5653b302a5a3..d8587ca4fbeb4d05a5ccc43c8fdd31acba638a0f 100644 (file)
@@ -917,8 +917,10 @@ static const struct ipv6_stub ipv6_stub_impl = {
        .fib6_get_table    = fib6_get_table,
        .fib6_table_lookup = fib6_table_lookup,
        .fib6_lookup       = fib6_lookup,
-       .fib6_multipath_select = fib6_multipath_select,
+       .fib6_select_path  = fib6_select_path,
        .ip6_mtu_from_fib6 = ip6_mtu_from_fib6,
+       .fib6_nh_init      = fib6_nh_init,
+       .fib6_nh_release   = fib6_nh_release,
        .udpv6_encap_enable = udpv6_encap_enable,
        .ndisc_send_na = ndisc_send_na,
        .nd_tbl = &nd_tbl,
index f590446595d8b879de9ee384021311deed7c943a..ab5ac643bae8e6bca19f145d91c5576b073b6326 100644 (file)
@@ -61,16 +61,16 @@ unsigned int fib6_rules_seq_read(struct net *net)
 }
 
 /* called with rcu lock held; no reference taken on fib6_info */
-struct fib6_info *fib6_lookup(struct net *net, int oif, struct flowi6 *fl6,
-                             int flags)
+int fib6_lookup(struct net *net, int oif, struct flowi6 *fl6,
+               struct fib6_result *res, int flags)
 {
-       struct fib6_info *f6i;
        int err;
 
        if (net->ipv6.fib6_has_custom_rules) {
                struct fib_lookup_arg arg = {
                        .lookup_ptr = fib6_table_lookup,
                        .lookup_data = &oif,
+                       .result = res,
                        .flags = FIB_LOOKUP_NOREF,
                };
 
@@ -78,19 +78,15 @@ struct fib6_info *fib6_lookup(struct net *net, int oif, struct flowi6 *fl6,
 
                err = fib_rules_lookup(net->ipv6.fib6_rules_ops,
                                       flowi6_to_flowi(fl6), flags, &arg);
-               if (err)
-                       return ERR_PTR(err);
-
-               f6i = arg.result ? : net->ipv6.fib6_null_entry;
        } else {
-               f6i = fib6_table_lookup(net, net->ipv6.fib6_local_tbl,
-                                       oif, fl6, flags);
-               if (!f6i || f6i == net->ipv6.fib6_null_entry)
-                       f6i = fib6_table_lookup(net, net->ipv6.fib6_main_tbl,
-                                               oif, fl6, flags);
+               err = fib6_table_lookup(net, net->ipv6.fib6_local_tbl, oif,
+                                       fl6, res, flags);
+               if (err || res->f6i == net->ipv6.fib6_null_entry)
+                       err = fib6_table_lookup(net, net->ipv6.fib6_main_tbl,
+                                               oif, fl6, res, flags);
        }
 
-       return f6i;
+       return err;
 }
 
 struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
@@ -157,10 +153,10 @@ static int fib6_rule_saddr(struct net *net, struct fib_rule *rule, int flags,
 static int fib6_rule_action_alt(struct fib_rule *rule, struct flowi *flp,
                                int flags, struct fib_lookup_arg *arg)
 {
+       struct fib6_result *res = arg->result;
        struct flowi6 *flp6 = &flp->u.ip6;
        struct net *net = rule->fr_net;
        struct fib6_table *table;
-       struct fib6_info *f6i;
        int err = -EAGAIN, *oif;
        u32 tb_id;
 
@@ -182,14 +178,10 @@ static int fib6_rule_action_alt(struct fib_rule *rule, struct flowi *flp,
                return -EAGAIN;
 
        oif = (int *)arg->lookup_data;
-       f6i = fib6_table_lookup(net, table, *oif, flp6, flags);
-       if (f6i != net->ipv6.fib6_null_entry) {
+       err = fib6_table_lookup(net, table, *oif, flp6, res, flags);
+       if (!err && res->f6i != net->ipv6.fib6_null_entry)
                err = fib6_rule_saddr(net, rule, flags, flp6,
-                                     fib6_info_nh_dev(f6i));
-
-               if (likely(!err))
-                       arg->result = f6i;
-       }
+                                     res->nh->fib_nh_dev);
 
        return err;
 }
index 79d2e43c05c5e792b1498c4bf5f73756252e2c7d..5fc1f4e0c0cf0d3dd403c2dcaf291ea9c096d235 100644 (file)
@@ -417,6 +417,7 @@ int ila_xlat_nl_cmd_flush(struct sk_buff *skb, struct genl_info *info)
 
 done:
        rhashtable_walk_stop(&iter);
+       rhashtable_walk_exit(&iter);
        return ret;
 }
 
index 8c00609a1513f7ff247c6eb02d0e36822e6e861d..b47e15df9769ccb2919e6ec0de5edd93f2ae518f 100644 (file)
@@ -354,10 +354,11 @@ struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
 }
 
 /* called with rcu lock held; no reference taken on fib6_info */
-struct fib6_info *fib6_lookup(struct net *net, int oif, struct flowi6 *fl6,
-                             int flags)
+int fib6_lookup(struct net *net, int oif, struct flowi6 *fl6,
+               struct fib6_result *res, int flags)
 {
-       return fib6_table_lookup(net, net->ipv6.fib6_main_tbl, oif, fl6, flags);
+       return fib6_table_lookup(net, net->ipv6.fib6_main_tbl, oif, fl6,
+                                res, flags);
 }
 
 static void __net_init fib6_tables_init(struct net *net)
@@ -2304,7 +2305,7 @@ static int ipv6_route_seq_show(struct seq_file *seq, void *v)
 #else
        seq_puts(seq, "00000000000000000000000000000000 00 ");
 #endif
-       if (rt->fib6_nh.fib_nh_has_gw) {
+       if (rt->fib6_nh.fib_nh_gw_family) {
                flags |= RTF_GATEWAY;
                seq_printf(seq, "%pi6", &rt->fib6_nh.fib_nh_gw6);
        } else {
index b32c95f0212809006455cb79768f96bd1c516994..655e46b227f9eb99e43369ffb96a411bd662eadb 100644 (file)
@@ -525,10 +525,10 @@ static int ip6gre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi)
 }
 
 static int ip6erspan_rcv(struct sk_buff *skb,
-                        struct tnl_ptk_info *tpi)
+                        struct tnl_ptk_info *tpi,
+                        int gre_hdr_len)
 {
        struct erspan_base_hdr *ershdr;
-       struct erspan_metadata *pkt_md;
        const struct ipv6hdr *ipv6h;
        struct erspan_md2 *md2;
        struct ip6_tnl *tunnel;
@@ -547,18 +547,16 @@ static int ip6erspan_rcv(struct sk_buff *skb,
                if (unlikely(!pskb_may_pull(skb, len)))
                        return PACKET_REJECT;
 
-               ershdr = (struct erspan_base_hdr *)skb->data;
-               pkt_md = (struct erspan_metadata *)(ershdr + 1);
-
                if (__iptunnel_pull_header(skb, len,
                                           htons(ETH_P_TEB),
                                           false, false) < 0)
                        return PACKET_REJECT;
 
                if (tunnel->parms.collect_md) {
+                       struct erspan_metadata *pkt_md, *md;
                        struct metadata_dst *tun_dst;
                        struct ip_tunnel_info *info;
-                       struct erspan_metadata *md;
+                       unsigned char *gh;
                        __be64 tun_id;
                        __be16 flags;
 
@@ -571,6 +569,14 @@ static int ip6erspan_rcv(struct sk_buff *skb,
                        if (!tun_dst)
                                return PACKET_REJECT;
 
+                       /* skb can be uncloned in __iptunnel_pull_header, so
+                        * old pkt_md is no longer valid and we need to reset
+                        * it
+                        */
+                       gh = skb_network_header(skb) +
+                            skb_network_header_len(skb);
+                       pkt_md = (struct erspan_metadata *)(gh + gre_hdr_len +
+                                                           sizeof(*ershdr));
                        info = &tun_dst->u.tun_info;
                        md = ip_tunnel_info_opts(info);
                        md->version = ver;
@@ -607,7 +613,7 @@ static int gre_rcv(struct sk_buff *skb)
 
        if (unlikely(tpi.proto == htons(ETH_P_ERSPAN) ||
                     tpi.proto == htons(ETH_P_ERSPAN2))) {
-               if (ip6erspan_rcv(skb, &tpi) == PACKET_RCVD)
+               if (ip6erspan_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
                        return 0;
                goto out;
        }
index edbd12067170bc77332d57a04c96812d9702520b..adef2236abe2e767602a9d4afbf23fc6db5750dd 100644 (file)
@@ -117,7 +117,7 @@ static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *
                neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false);
        if (!IS_ERR(neigh)) {
                sock_confirm_neigh(skb, neigh);
-               ret = neigh_output(neigh, skb);
+               ret = neigh_output(neigh, skb, false);
                rcu_read_unlock_bh();
                return ret;
        }
@@ -601,7 +601,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
                                inet6_sk(skb->sk) : NULL;
        struct ipv6hdr *tmp_hdr;
        struct frag_hdr *fh;
-       unsigned int mtu, hlen, left, len;
+       unsigned int mtu, hlen, left, len, nexthdr_offset;
        int hroom, troom;
        __be32 frag_id;
        int ptr, offset = 0, err = 0;
@@ -612,6 +612,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
                goto fail;
        hlen = err;
        nexthdr = *prevhdr;
+       nexthdr_offset = prevhdr - skb_network_header(skb);
 
        mtu = ip6_skb_dst_mtu(skb);
 
@@ -646,6 +647,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
            (err = skb_checksum_help(skb)))
                goto fail;
 
+       prevhdr = skb_network_header(skb) + nexthdr_offset;
        hroom = LL_RESERVED_SPACE(rt->dst.dev);
        if (skb_has_frag_list(skb)) {
                unsigned int first_len = skb_pagelen(skb);
index 0c6403cf8b5226fbe4bf2e4506b3816b30973b0b..ade1390c63488a60b405ca70052b3493fecc67d5 100644 (file)
@@ -627,7 +627,7 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
                rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL,
                                           eiph->daddr, eiph->saddr, 0, 0,
                                           IPPROTO_IPIP, RT_TOS(eiph->tos), 0);
-               if (IS_ERR(rt) || rt->dst.dev->type != ARPHRD_TUNNEL) {
+               if (IS_ERR(rt) || rt->dst.dev->type != ARPHRD_TUNNEL6) {
                        if (!IS_ERR(rt))
                                ip_rt_put(rt);
                        goto out;
@@ -636,7 +636,7 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
        } else {
                if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos,
                                   skb2->dev) ||
-                   skb_dst(skb2)->dev->type != ARPHRD_TUNNEL)
+                   skb_dst(skb2)->dev->type != ARPHRD_TUNNEL6)
                        goto out;
        }
 
index e4dd57976737a81549861c8ff5d9e9dcb7793a8d..4e69847ed5bef4438499a800546e22fbed2962a7 100644 (file)
@@ -355,7 +355,6 @@ static const struct rhashtable_params ip6mr_rht_params = {
        .key_offset = offsetof(struct mfc6_cache, cmparg),
        .key_len = sizeof(struct mfc6_cache_cmp_arg),
        .nelem_hint = 3,
-       .locks_mul = 1,
        .obj_cmpfn = ip6mr_hash_cmp,
        .automatic_shrinking = true,
 };
index 66c8b294e02bbacbd673e84bfe8081e67281c1f8..4c8e2ea8bf193ac006a7dcb4e7e94dff2a394b0e 100644 (file)
@@ -77,6 +77,8 @@ static u32 ndisc_hash(const void *pkey,
                      const struct net_device *dev,
                      __u32 *hash_rnd);
 static bool ndisc_key_eq(const struct neighbour *neigh, const void *pkey);
+static bool ndisc_allow_add(const struct net_device *dev,
+                           struct netlink_ext_ack *extack);
 static int ndisc_constructor(struct neighbour *neigh);
 static void ndisc_solicit(struct neighbour *neigh, struct sk_buff *skb);
 static void ndisc_error_report(struct neighbour *neigh, struct sk_buff *skb);
@@ -117,6 +119,7 @@ struct neigh_table nd_tbl = {
        .pconstructor = pndisc_constructor,
        .pdestructor =  pndisc_destructor,
        .proxy_redo =   pndisc_redo,
+       .allow_add  =   ndisc_allow_add,
        .id =           "ndisc_cache",
        .parms = {
                .tbl                    = &nd_tbl,
@@ -392,6 +395,20 @@ static void pndisc_destructor(struct pneigh_entry *n)
        ipv6_dev_mc_dec(dev, &maddr);
 }
 
+/* called with rtnl held */
+static bool ndisc_allow_add(const struct net_device *dev,
+                           struct netlink_ext_ack *extack)
+{
+       struct inet6_dev *idev = __in6_dev_get(dev);
+
+       if (!idev || idev->cnf.disable_ipv6) {
+               NL_SET_ERR_MSG(extack, "IPv6 is disabled on this device");
+               return false;
+       }
+
+       return true;
+}
+
 static struct sk_buff *ndisc_alloc_skb(struct net_device *dev,
                                       int len)
 {
index ddc99a1653aa790352f9c1a4b11edcb61abbcd1a..086fc669279ecdf71d4724436aaa1c54796cda4c 100644 (file)
@@ -23,14 +23,6 @@ config NF_TABLES_IPV6
 
 if NF_TABLES_IPV6
 
-config NFT_CHAIN_ROUTE_IPV6
-       tristate "IPv6 nf_tables route chain support"
-       help
-         This option enables the "route" chain for IPv6 in nf_tables. This
-         chain type is used to force packet re-routing after mangling header
-         fields such as the source, destination, flowlabel, hop-limit and
-         the packet mark.
-
 config NFT_REJECT_IPV6
        select NF_REJECT_IPV6
        default NFT_REJECT
@@ -278,15 +270,10 @@ if IP6_NF_NAT
 
 config IP6_NF_TARGET_MASQUERADE
        tristate "MASQUERADE target support"
-       select NF_NAT_MASQUERADE
+       select NETFILTER_XT_TARGET_MASQUERADE
        help
-         Masquerading is a special case of NAT: all outgoing connections are
-         changed to seem to come from a particular interface's address, and
-         if the interface goes down, those connections are lost.  This is
-         only useful for dialup accounts with dynamic IP address (ie. your IP
-         address will be different on next dialup).
-
-         To compile it as a module, choose M here.  If unsure, say N.
+         This is a backwards-compat option for the user's convenience
+         (e.g. when running oldconfig). It selects NETFILTER_XT_TARGET_MASQUERADE.
 
 config IP6_NF_TARGET_NPT
        tristate "NPT (Network Prefix translation) target support"
index 3853c648ebaaa5fe0c5fad8d3fb36bab5e3a5977..731a74c60dca36c64c7a71872e63d4123c51ff05 100644 (file)
@@ -27,7 +27,6 @@ obj-$(CONFIG_NF_REJECT_IPV6) += nf_reject_ipv6.o
 obj-$(CONFIG_NF_DUP_IPV6) += nf_dup_ipv6.o
 
 # nf_tables
-obj-$(CONFIG_NFT_CHAIN_ROUTE_IPV6) += nft_chain_route_ipv6.o
 obj-$(CONFIG_NFT_REJECT_IPV6) += nft_reject_ipv6.o
 obj-$(CONFIG_NFT_DUP_IPV6) += nft_dup_ipv6.o
 obj-$(CONFIG_NFT_FIB_IPV6) += nft_fib_ipv6.o
@@ -47,7 +46,6 @@ obj-$(CONFIG_IP6_NF_MATCH_RT) += ip6t_rt.o
 obj-$(CONFIG_IP6_NF_MATCH_SRH) += ip6t_srh.o
 
 # targets
-obj-$(CONFIG_IP6_NF_TARGET_MASQUERADE) += ip6t_MASQUERADE.o
 obj-$(CONFIG_IP6_NF_TARGET_NPT) += ip6t_NPT.o
 obj-$(CONFIG_IP6_NF_TARGET_REJECT) += ip6t_REJECT.o
 obj-$(CONFIG_IP6_NF_TARGET_SYNPROXY) += ip6t_SYNPROXY.o
diff --git a/net/ipv6/netfilter/ip6t_MASQUERADE.c b/net/ipv6/netfilter/ip6t_MASQUERADE.c
deleted file mode 100644 (file)
index 29c7f19..0000000
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Copyright (c) 2011 Patrick McHardy <kaber@trash.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Based on Rusty Russell's IPv6 MASQUERADE target. Development of IPv6
- * NAT funded by Astaro.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/netdevice.h>
-#include <linux/ipv6.h>
-#include <linux/netfilter.h>
-#include <linux/netfilter_ipv6.h>
-#include <linux/netfilter/x_tables.h>
-#include <net/netfilter/nf_nat.h>
-#include <net/addrconf.h>
-#include <net/ipv6.h>
-#include <net/netfilter/ipv6/nf_nat_masquerade.h>
-
-static unsigned int
-masquerade_tg6(struct sk_buff *skb, const struct xt_action_param *par)
-{
-       return nf_nat_masquerade_ipv6(skb, par->targinfo, xt_out(par));
-}
-
-static int masquerade_tg6_checkentry(const struct xt_tgchk_param *par)
-{
-       const struct nf_nat_range2 *range = par->targinfo;
-
-       if (range->flags & NF_NAT_RANGE_MAP_IPS)
-               return -EINVAL;
-       return nf_ct_netns_get(par->net, par->family);
-}
-
-static void masquerade_tg6_destroy(const struct xt_tgdtor_param *par)
-{
-       nf_ct_netns_put(par->net, par->family);
-}
-
-static struct xt_target masquerade_tg6_reg __read_mostly = {
-       .name           = "MASQUERADE",
-       .family         = NFPROTO_IPV6,
-       .checkentry     = masquerade_tg6_checkentry,
-       .destroy        = masquerade_tg6_destroy,
-       .target         = masquerade_tg6,
-       .targetsize     = sizeof(struct nf_nat_range),
-       .table          = "nat",
-       .hooks          = 1 << NF_INET_POST_ROUTING,
-       .me             = THIS_MODULE,
-};
-
-static int __init masquerade_tg6_init(void)
-{
-       int err;
-
-       err = xt_register_target(&masquerade_tg6_reg);
-       if (err)
-               return err;
-
-       err = nf_nat_masquerade_ipv6_register_notifier();
-       if (err)
-               xt_unregister_target(&masquerade_tg6_reg);
-
-       return err;
-}
-static void __exit masquerade_tg6_exit(void)
-{
-       nf_nat_masquerade_ipv6_unregister_notifier();
-       xt_unregister_target(&masquerade_tg6_reg);
-}
-
-module_init(masquerade_tg6_init);
-module_exit(masquerade_tg6_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
-MODULE_DESCRIPTION("Xtables: automatic address SNAT");
diff --git a/net/ipv6/netfilter/nft_chain_route_ipv6.c b/net/ipv6/netfilter/nft_chain_route_ipv6.c
deleted file mode 100644 (file)
index da3f1f8..0000000
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
- * Copyright (c) 2012 Pablo Neira Ayuso <pablo@netfilter.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Development of this code funded by Astaro AG (http://www.astaro.com/)
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/list.h>
-#include <linux/skbuff.h>
-#include <linux/netlink.h>
-#include <linux/netfilter.h>
-#include <linux/netfilter_ipv6.h>
-#include <linux/netfilter/nfnetlink.h>
-#include <linux/netfilter/nf_tables.h>
-#include <net/netfilter/nf_tables.h>
-#include <net/netfilter/nf_tables_ipv6.h>
-#include <net/route.h>
-
-static unsigned int nf_route_table_hook(void *priv,
-                                       struct sk_buff *skb,
-                                       const struct nf_hook_state *state)
-{
-       unsigned int ret;
-       struct nft_pktinfo pkt;
-       struct in6_addr saddr, daddr;
-       u_int8_t hop_limit;
-       u32 mark, flowlabel;
-       int err;
-
-       nft_set_pktinfo(&pkt, skb, state);
-       nft_set_pktinfo_ipv6(&pkt, skb);
-
-       /* save source/dest address, mark, hoplimit, flowlabel, priority */
-       memcpy(&saddr, &ipv6_hdr(skb)->saddr, sizeof(saddr));
-       memcpy(&daddr, &ipv6_hdr(skb)->daddr, sizeof(daddr));
-       mark = skb->mark;
-       hop_limit = ipv6_hdr(skb)->hop_limit;
-
-       /* flowlabel and prio (includes version, which shouldn't change either */
-       flowlabel = *((u32 *)ipv6_hdr(skb));
-
-       ret = nft_do_chain(&pkt, priv);
-       if (ret != NF_DROP && ret != NF_STOLEN &&
-           (memcmp(&ipv6_hdr(skb)->saddr, &saddr, sizeof(saddr)) ||
-            memcmp(&ipv6_hdr(skb)->daddr, &daddr, sizeof(daddr)) ||
-            skb->mark != mark ||
-            ipv6_hdr(skb)->hop_limit != hop_limit ||
-            flowlabel != *((u_int32_t *)ipv6_hdr(skb)))) {
-               err = ip6_route_me_harder(state->net, skb);
-               if (err < 0)
-                       ret = NF_DROP_ERR(err);
-       }
-
-       return ret;
-}
-
-static const struct nft_chain_type nft_chain_route_ipv6 = {
-       .name           = "route",
-       .type           = NFT_CHAIN_T_ROUTE,
-       .family         = NFPROTO_IPV6,
-       .owner          = THIS_MODULE,
-       .hook_mask      = (1 << NF_INET_LOCAL_OUT),
-       .hooks          = {
-               [NF_INET_LOCAL_OUT]     = nf_route_table_hook,
-       },
-};
-
-static int __init nft_chain_route_init(void)
-{
-       nft_register_chain_type(&nft_chain_route_ipv6);
-
-       return 0;
-}
-
-static void __exit nft_chain_route_exit(void)
-{
-       nft_unregister_chain_type(&nft_chain_route_ipv6);
-}
-
-module_init(nft_chain_route_init);
-module_exit(nft_chain_route_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
-MODULE_ALIAS_NFT_CHAIN(AF_INET6, "route");
index e0ee30cbd0795a8eaf22017ed0aec98a3bc68aa9..e8c73b7782cdc5a2c4ca5ef50aca4c8d8556c7be 100644 (file)
@@ -102,14 +102,15 @@ static void               ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
                                           struct sk_buff *skb, u32 mtu);
 static void            rt6_do_redirect(struct dst_entry *dst, struct sock *sk,
                                        struct sk_buff *skb);
-static int rt6_score_route(struct fib6_info *rt, int oif, int strict);
+static int rt6_score_route(const struct fib6_nh *nh, u32 fib6_flags, int oif,
+                          int strict);
 static size_t rt6_nlmsg_size(struct fib6_info *rt);
 static int rt6_fill_node(struct net *net, struct sk_buff *skb,
                         struct fib6_info *rt, struct dst_entry *dst,
                         struct in6_addr *dest, struct in6_addr *src,
                         int iif, int type, u32 portid, u32 seq,
                         unsigned int flags);
-static struct rt6_info *rt6_find_cached_rt(struct fib6_info *rt,
+static struct rt6_info *rt6_find_cached_rt(const struct fib6_result *res,
                                           struct in6_addr *daddr,
                                           struct in6_addr *saddr);
 
@@ -427,13 +428,15 @@ static bool rt6_check_expired(const struct rt6_info *rt)
        return false;
 }
 
-struct fib6_info *fib6_multipath_select(const struct net *net,
-                                       struct fib6_info *match,
-                                       struct flowi6 *fl6, int oif,
-                                       const struct sk_buff *skb,
-                                       int strict)
+void fib6_select_path(const struct net *net, struct fib6_result *res,
+                     struct flowi6 *fl6, int oif, bool have_oif_match,
+                     const struct sk_buff *skb, int strict)
 {
        struct fib6_info *sibling, *next_sibling;
+       struct fib6_info *match = res->f6i;
+
+       if (!match->fib6_nsiblings || have_oif_match)
+               goto out;
 
        /* We might have already computed the hash for ICMPv6 errors. In such
         * case it will always be non-zero. Otherwise now is the time to do it.
@@ -442,60 +445,88 @@ struct fib6_info *fib6_multipath_select(const struct net *net,
                fl6->mp_hash = rt6_multipath_hash(net, fl6, skb, NULL);
 
        if (fl6->mp_hash <= atomic_read(&match->fib6_nh.fib_nh_upper_bound))
-               return match;
+               goto out;
 
        list_for_each_entry_safe(sibling, next_sibling, &match->fib6_siblings,
                                 fib6_siblings) {
+               const struct fib6_nh *nh = &sibling->fib6_nh;
                int nh_upper_bound;
 
-               nh_upper_bound = atomic_read(&sibling->fib6_nh.fib_nh_upper_bound);
+               nh_upper_bound = atomic_read(&nh->fib_nh_upper_bound);
                if (fl6->mp_hash > nh_upper_bound)
                        continue;
-               if (rt6_score_route(sibling, oif, strict) < 0)
+               if (rt6_score_route(nh, sibling->fib6_flags, oif, strict) < 0)
                        break;
                match = sibling;
                break;
        }
 
-       return match;
+out:
+       res->f6i = match;
+       res->nh = &match->fib6_nh;
 }
 
 /*
  *     Route lookup. rcu_read_lock() should be held.
  */
 
-static inline struct fib6_info *rt6_device_match(struct net *net,
-                                                struct fib6_info *rt,
-                                                   const struct in6_addr *saddr,
-                                                   int oif,
-                                                   int flags)
+static bool __rt6_device_match(struct net *net, const struct fib6_nh *nh,
+                              const struct in6_addr *saddr, int oif, int flags)
 {
-       struct fib6_info *sprt;
+       const struct net_device *dev;
 
-       if (!oif && ipv6_addr_any(saddr) &&
-           !(rt->fib6_nh.fib_nh_flags & RTNH_F_DEAD))
-               return rt;
+       if (nh->fib_nh_flags & RTNH_F_DEAD)
+               return false;
+
+       dev = nh->fib_nh_dev;
+       if (oif) {
+               if (dev->ifindex == oif)
+                       return true;
+       } else {
+               if (ipv6_chk_addr(net, saddr, dev,
+                                 flags & RT6_LOOKUP_F_IFACE))
+                       return true;
+       }
 
-       for (sprt = rt; sprt; sprt = rcu_dereference(sprt->fib6_next)) {
-               const struct net_device *dev = sprt->fib6_nh.fib_nh_dev;
+       return false;
+}
 
-               if (sprt->fib6_nh.fib_nh_flags & RTNH_F_DEAD)
-                       continue;
+static void rt6_device_match(struct net *net, struct fib6_result *res,
+                            const struct in6_addr *saddr, int oif, int flags)
+{
+       struct fib6_info *f6i = res->f6i;
+       struct fib6_info *spf6i;
+       struct fib6_nh *nh;
 
-               if (oif) {
-                       if (dev->ifindex == oif)
-                               return sprt;
-               } else {
-                       if (ipv6_chk_addr(net, saddr, dev,
-                                         flags & RT6_LOOKUP_F_IFACE))
-                               return sprt;
+       if (!oif && ipv6_addr_any(saddr)) {
+               nh = &f6i->fib6_nh;
+               if (!(nh->fib_nh_flags & RTNH_F_DEAD))
+                       goto out;
+       }
+
+       for (spf6i = f6i; spf6i; spf6i = rcu_dereference(spf6i->fib6_next)) {
+               nh = &spf6i->fib6_nh;
+               if (__rt6_device_match(net, nh, saddr, oif, flags)) {
+                       res->f6i = spf6i;
+                       goto out;
                }
        }
 
-       if (oif && flags & RT6_LOOKUP_F_IFACE)
-               return net->ipv6.fib6_null_entry;
+       if (oif && flags & RT6_LOOKUP_F_IFACE) {
+               res->f6i = net->ipv6.fib6_null_entry;
+               nh = &res->f6i->fib6_nh;
+               goto out;
+       }
 
-       return rt->fib6_nh.fib_nh_flags & RTNH_F_DEAD ? net->ipv6.fib6_null_entry : rt;
+       nh = &f6i->fib6_nh;
+       if (nh->fib_nh_flags & RTNH_F_DEAD) {
+               res->f6i = net->ipv6.fib6_null_entry;
+               nh = &res->f6i->fib6_nh;
+       }
+out:
+       res->nh = nh;
+       res->fib6_type = res->f6i->fib6_type;
+       res->fib6_flags = res->f6i->fib6_flags;
 }
 
 #ifdef CONFIG_IPV6_ROUTER_PREF
@@ -517,7 +548,7 @@ static void rt6_probe_deferred(struct work_struct *w)
        kfree(work);
 }
 
-static void rt6_probe(struct fib6_info *rt)
+static void rt6_probe(struct fib6_nh *fib6_nh)
 {
        struct __rt6_probe_work *work = NULL;
        const struct in6_addr *nh_gw;
@@ -533,11 +564,11 @@ static void rt6_probe(struct fib6_info *rt)
         * Router Reachability Probe MUST be rate-limited
         * to no more than one per minute.
         */
-       if (!rt || !rt->fib6_nh.fib_nh_has_gw)
+       if (fib6_nh->fib_nh_gw_family)
                return;
 
-       nh_gw = &rt->fib6_nh.fib_nh_gw6;
-       dev = rt->fib6_nh.fib_nh_dev;
+       nh_gw = &fib6_nh->fib_nh_gw6;
+       dev = fib6_nh->fib_nh_dev;
        rcu_read_lock_bh();
        idev = __in6_dev_get(dev);
        neigh = __ipv6_neigh_lookup_noref(dev, nh_gw);
@@ -554,13 +585,13 @@ static void rt6_probe(struct fib6_info *rt)
                                __neigh_set_probe_once(neigh);
                }
                write_unlock(&neigh->lock);
-       } else if (time_after(jiffies, rt->last_probe +
+       } else if (time_after(jiffies, fib6_nh->last_probe +
                                       idev->cnf.rtr_probe_interval)) {
                work = kmalloc(sizeof(*work), GFP_ATOMIC);
        }
 
        if (work) {
-               rt->last_probe = jiffies;
+               fib6_nh->last_probe = jiffies;
                INIT_WORK(&work->work, rt6_probe_deferred);
                work->target = *nh_gw;
                dev_hold(dev);
@@ -572,7 +603,7 @@ static void rt6_probe(struct fib6_info *rt)
        rcu_read_unlock_bh();
 }
 #else
-static inline void rt6_probe(struct fib6_info *rt)
+static inline void rt6_probe(struct fib6_nh *fib6_nh)
 {
 }
 #endif
@@ -580,27 +611,14 @@ static inline void rt6_probe(struct fib6_info *rt)
 /*
  * Default Router Selection (RFC 2461 6.3.6)
  */
-static inline int rt6_check_dev(struct fib6_info *rt, int oif)
-{
-       const struct net_device *dev = rt->fib6_nh.fib_nh_dev;
-
-       if (!oif || dev->ifindex == oif)
-               return 2;
-       return 0;
-}
-
-static inline enum rt6_nud_state rt6_check_neigh(struct fib6_info *rt)
+static enum rt6_nud_state rt6_check_neigh(const struct fib6_nh *fib6_nh)
 {
        enum rt6_nud_state ret = RT6_NUD_FAIL_HARD;
        struct neighbour *neigh;
 
-       if (rt->fib6_flags & RTF_NONEXTHOP ||
-           !rt->fib6_nh.fib_nh_has_gw)
-               return RT6_NUD_SUCCEED;
-
        rcu_read_lock_bh();
-       neigh = __ipv6_neigh_lookup_noref(rt->fib6_nh.fib_nh_dev,
-                                         &rt->fib6_nh.fib_nh_gw6);
+       neigh = __ipv6_neigh_lookup_noref(fib6_nh->fib_nh_dev,
+                                         &fib6_nh->fib_nh_gw6);
        if (neigh) {
                read_lock(&neigh->lock);
                if (neigh->nud_state & NUD_VALID)
@@ -621,43 +639,44 @@ static inline enum rt6_nud_state rt6_check_neigh(struct fib6_info *rt)
        return ret;
 }
 
-static int rt6_score_route(struct fib6_info *rt, int oif, int strict)
+static int rt6_score_route(const struct fib6_nh *nh, u32 fib6_flags, int oif,
+                          int strict)
 {
-       int m;
+       int m = 0;
+
+       if (!oif || nh->fib_nh_dev->ifindex == oif)
+               m = 2;
 
-       m = rt6_check_dev(rt, oif);
        if (!m && (strict & RT6_LOOKUP_F_IFACE))
                return RT6_NUD_FAIL_HARD;
 #ifdef CONFIG_IPV6_ROUTER_PREF
-       m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(rt->fib6_flags)) << 2;
+       m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(fib6_flags)) << 2;
 #endif
-       if (strict & RT6_LOOKUP_F_REACHABLE) {
-               int n = rt6_check_neigh(rt);
+       if ((strict & RT6_LOOKUP_F_REACHABLE) &&
+           !(fib6_flags & RTF_NONEXTHOP) && nh->fib_nh_gw_family) {
+               int n = rt6_check_neigh(nh);
                if (n < 0)
                        return n;
        }
        return m;
 }
 
-static struct fib6_info *find_match(struct fib6_info *rt, int oif, int strict,
-                                  int *mpri, struct fib6_info *match,
-                                  bool *do_rr)
+static bool find_match(struct fib6_nh *nh, u32 fib6_flags,
+                      int oif, int strict, int *mpri, bool *do_rr)
 {
-       int m;
        bool match_do_rr = false;
+       bool rc = false;
+       int m;
 
-       if (rt->fib6_nh.fib_nh_flags & RTNH_F_DEAD)
+       if (nh->fib_nh_flags & RTNH_F_DEAD)
                goto out;
 
-       if (ip6_ignore_linkdown(rt->fib6_nh.fib_nh_dev) &&
-           rt->fib6_nh.fib_nh_flags & RTNH_F_LINKDOWN &&
+       if (ip6_ignore_linkdown(nh->fib_nh_dev) &&
+           nh->fib_nh_flags & RTNH_F_LINKDOWN &&
            !(strict & RT6_LOOKUP_F_IGNORE_LINKSTATE))
                goto out;
 
-       if (fib6_check_expired(rt))
-               goto out;
-
-       m = rt6_score_route(rt, oif, strict);
+       m = rt6_score_route(nh, fib6_flags, oif, strict);
        if (m == RT6_NUD_FAIL_DO_RR) {
                match_do_rr = true;
                m = 0; /* lowest valid score */
@@ -666,67 +685,82 @@ static struct fib6_info *find_match(struct fib6_info *rt, int oif, int strict,
        }
 
        if (strict & RT6_LOOKUP_F_REACHABLE)
-               rt6_probe(rt);
+               rt6_probe(nh);
 
        /* note that m can be RT6_NUD_FAIL_PROBE at this point */
        if (m > *mpri) {
                *do_rr = match_do_rr;
                *mpri = m;
-               match = rt;
+               rc = true;
        }
 out:
-       return match;
+       return rc;
 }
 
-static struct fib6_info *find_rr_leaf(struct fib6_node *fn,
-                                    struct fib6_info *leaf,
-                                    struct fib6_info *rr_head,
-                                    u32 metric, int oif, int strict,
-                                    bool *do_rr)
+static void __find_rr_leaf(struct fib6_info *f6i_start,
+                          struct fib6_info *nomatch, u32 metric,
+                          struct fib6_result *res, struct fib6_info **cont,
+                          int oif, int strict, bool *do_rr, int *mpri)
 {
-       struct fib6_info *rt, *match, *cont;
-       int mpri = -1;
+       struct fib6_info *f6i;
 
-       match = NULL;
-       cont = NULL;
-       for (rt = rr_head; rt; rt = rcu_dereference(rt->fib6_next)) {
-               if (rt->fib6_metric != metric) {
-                       cont = rt;
-                       break;
+       for (f6i = f6i_start;
+            f6i && f6i != nomatch;
+            f6i = rcu_dereference(f6i->fib6_next)) {
+               struct fib6_nh *nh;
+
+               if (cont && f6i->fib6_metric != metric) {
+                       *cont = f6i;
+                       return;
                }
 
-               match = find_match(rt, oif, strict, &mpri, match, do_rr);
-       }
+               if (fib6_check_expired(f6i))
+                       continue;
 
-       for (rt = leaf; rt && rt != rr_head;
-            rt = rcu_dereference(rt->fib6_next)) {
-               if (rt->fib6_metric != metric) {
-                       cont = rt;
-                       break;
+               nh = &f6i->fib6_nh;
+               if (find_match(nh, f6i->fib6_flags, oif, strict, mpri, do_rr)) {
+                       res->f6i = f6i;
+                       res->nh = nh;
+                       res->fib6_flags = f6i->fib6_flags;
+                       res->fib6_type = f6i->fib6_type;
                }
-
-               match = find_match(rt, oif, strict, &mpri, match, do_rr);
        }
+}
+
+static void find_rr_leaf(struct fib6_node *fn, struct fib6_info *leaf,
+                        struct fib6_info *rr_head, int oif, int strict,
+                        bool *do_rr, struct fib6_result *res)
+{
+       u32 metric = rr_head->fib6_metric;
+       struct fib6_info *cont = NULL;
+       int mpri = -1;
+
+       __find_rr_leaf(rr_head, NULL, metric, res, &cont,
+                      oif, strict, do_rr, &mpri);
 
-       if (match || !cont)
-               return match;
+       __find_rr_leaf(leaf, rr_head, metric, res, &cont,
+                      oif, strict, do_rr, &mpri);
 
-       for (rt = cont; rt; rt = rcu_dereference(rt->fib6_next))
-               match = find_match(rt, oif, strict, &mpri, match, do_rr);
+       if (res->f6i || !cont)
+               return;
 
-       return match;
+       __find_rr_leaf(cont, NULL, metric, res, NULL,
+                      oif, strict, do_rr, &mpri);
 }
 
-static struct fib6_info *rt6_select(struct net *net, struct fib6_node *fn,
-                                  int oif, int strict)
+static void rt6_select(struct net *net, struct fib6_node *fn, int oif,
+                      struct fib6_result *res, int strict)
 {
        struct fib6_info *leaf = rcu_dereference(fn->leaf);
-       struct fib6_info *match, *rt0;
+       struct fib6_info *rt0;
        bool do_rr = false;
        int key_plen;
 
+       /* make sure this function or its helpers sets f6i */
+       res->f6i = NULL;
+
        if (!leaf || leaf == net->ipv6.fib6_null_entry)
-               return net->ipv6.fib6_null_entry;
+               goto out;
 
        rt0 = rcu_dereference(fn->rr_ptr);
        if (!rt0)
@@ -743,11 +777,9 @@ static struct fib6_info *rt6_select(struct net *net, struct fib6_node *fn,
                key_plen = rt0->fib6_src.plen;
 #endif
        if (fn->fn_bit != key_plen)
-               return net->ipv6.fib6_null_entry;
-
-       match = find_rr_leaf(fn, leaf, rt0, rt0->fib6_metric, oif, strict,
-                            &do_rr);
+               goto out;
 
+       find_rr_leaf(fn, leaf, rt0, oif, strict, &do_rr, res);
        if (do_rr) {
                struct fib6_info *next = rcu_dereference(rt0->fib6_next);
 
@@ -764,12 +796,19 @@ static struct fib6_info *rt6_select(struct net *net, struct fib6_node *fn,
                }
        }
 
-       return match ? match : net->ipv6.fib6_null_entry;
+out:
+       if (!res->f6i) {
+               res->f6i = net->ipv6.fib6_null_entry;
+               res->nh = &res->f6i->fib6_nh;
+               res->fib6_flags = res->f6i->fib6_flags;
+               res->fib6_type = res->f6i->fib6_type;
+       }
 }
 
-static bool rt6_is_gw_or_nonexthop(const struct fib6_info *rt)
+static bool rt6_is_gw_or_nonexthop(const struct fib6_result *res)
 {
-       return (rt->fib6_flags & RTF_NONEXTHOP) || rt->fib6_nh.fib_nh_has_gw;
+       return (res->f6i->fib6_flags & RTF_NONEXTHOP) ||
+              res->nh->fib_nh_gw_family;
 }
 
 #ifdef CONFIG_IPV6_ROUTE_INFO
@@ -853,17 +892,17 @@ int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
  */
 
 /* called with rcu_lock held */
-static struct net_device *ip6_rt_get_dev_rcu(struct fib6_info *rt)
+static struct net_device *ip6_rt_get_dev_rcu(const struct fib6_result *res)
 {
-       struct net_device *dev = rt->fib6_nh.fib_nh_dev;
+       struct net_device *dev = res->nh->fib_nh_dev;
 
-       if (rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST)) {
+       if (res->fib6_flags & (RTF_LOCAL | RTF_ANYCAST)) {
                /* for copies of local routes, dst->dev needs to be the
                 * device if it is a master device, the master device if
                 * device is enslaved, and the loopback as the default
                 */
                if (netif_is_l3_slave(dev) &&
-                   !rt6_need_strict(&rt->fib6_dst.addr))
+                   !rt6_need_strict(&res->f6i->fib6_dst.addr))
                        dev = l3mdev_master_dev_rcu(dev);
                else if (!netif_is_l3_master(dev))
                        dev = dev_net(dev)->loopback_dev;
@@ -909,11 +948,11 @@ static unsigned short fib6_info_dst_flags(struct fib6_info *rt)
        return flags;
 }
 
-static void ip6_rt_init_dst_reject(struct rt6_info *rt, struct fib6_info *ort)
+static void ip6_rt_init_dst_reject(struct rt6_info *rt, u8 fib6_type)
 {
-       rt->dst.error = ip6_rt_type_to_error(ort->fib6_type);
+       rt->dst.error = ip6_rt_type_to_error(fib6_type);
 
-       switch (ort->fib6_type) {
+       switch (fib6_type) {
        case RTN_BLACKHOLE:
                rt->dst.output = dst_discard_out;
                rt->dst.input = dst_discard;
@@ -931,26 +970,28 @@ static void ip6_rt_init_dst_reject(struct rt6_info *rt, struct fib6_info *ort)
        }
 }
 
-static void ip6_rt_init_dst(struct rt6_info *rt, struct fib6_info *ort)
+static void ip6_rt_init_dst(struct rt6_info *rt, const struct fib6_result *res)
 {
-       if (ort->fib6_flags & RTF_REJECT) {
-               ip6_rt_init_dst_reject(rt, ort);
+       struct fib6_info *f6i = res->f6i;
+
+       if (res->fib6_flags & RTF_REJECT) {
+               ip6_rt_init_dst_reject(rt, res->fib6_type);
                return;
        }
 
        rt->dst.error = 0;
        rt->dst.output = ip6_output;
 
-       if (ort->fib6_type == RTN_LOCAL || ort->fib6_type == RTN_ANYCAST) {
+       if (res->fib6_type == RTN_LOCAL || res->fib6_type == RTN_ANYCAST) {
                rt->dst.input = ip6_input;
-       } else if (ipv6_addr_type(&ort->fib6_dst.addr) & IPV6_ADDR_MULTICAST) {
+       } else if (ipv6_addr_type(&f6i->fib6_dst.addr) & IPV6_ADDR_MULTICAST) {
                rt->dst.input = ip6_mc_input;
        } else {
                rt->dst.input = ip6_forward;
        }
 
-       if (ort->fib6_nh.fib_nh_lws) {
-               rt->dst.lwtstate = lwtstate_get(ort->fib6_nh.fib_nh_lws);
+       if (res->nh->fib_nh_lws) {
+               rt->dst.lwtstate = lwtstate_get(res->nh->fib_nh_lws);
                lwtunnel_set_redirect(&rt->dst);
        }
 
@@ -965,23 +1006,25 @@ static void rt6_set_from(struct rt6_info *rt, struct fib6_info *from)
        ip_dst_init_metrics(&rt->dst, from->fib6_metrics);
 }
 
-/* Caller must already hold reference to @ort */
-static void ip6_rt_copy_init(struct rt6_info *rt, struct fib6_info *ort)
+/* Caller must already hold reference to f6i in result */
+static void ip6_rt_copy_init(struct rt6_info *rt, const struct fib6_result *res)
 {
-       struct net_device *dev = fib6_info_nh_dev(ort);
+       const struct fib6_nh *nh = res->nh;
+       const struct net_device *dev = nh->fib_nh_dev;
+       struct fib6_info *f6i = res->f6i;
 
-       ip6_rt_init_dst(rt, ort);
+       ip6_rt_init_dst(rt, res);
 
-       rt->rt6i_dst = ort->fib6_dst;
+       rt->rt6i_dst = f6i->fib6_dst;
        rt->rt6i_idev = dev ? in6_dev_get(dev) : NULL;
-       rt->rt6i_flags = ort->fib6_flags;
-       if (ort->fib6_nh.fib_nh_has_gw) {
-               rt->rt6i_gateway = ort->fib6_nh.fib_nh_gw6;
+       rt->rt6i_flags = res->fib6_flags;
+       if (nh->fib_nh_gw_family) {
+               rt->rt6i_gateway = nh->fib_nh_gw6;
                rt->rt6i_flags |= RTF_GATEWAY;
        }
-       rt6_set_from(rt, ort);
+       rt6_set_from(rt, f6i);
 #ifdef CONFIG_IPV6_SUBTREES
-       rt->rt6i_src = ort->fib6_src;
+       rt->rt6i_src = f6i->fib6_src;
 #endif
 }
 
@@ -1020,22 +1063,24 @@ static bool ip6_hold_safe(struct net *net, struct rt6_info **prt)
 }
 
 /* called with rcu_lock held */
-static struct rt6_info *ip6_create_rt_rcu(struct fib6_info *rt)
+static struct rt6_info *ip6_create_rt_rcu(const struct fib6_result *res)
 {
-       unsigned short flags = fib6_info_dst_flags(rt);
-       struct net_device *dev = rt->fib6_nh.fib_nh_dev;
+       struct net_device *dev = res->nh->fib_nh_dev;
+       struct fib6_info *f6i = res->f6i;
+       unsigned short flags;
        struct rt6_info *nrt;
 
-       if (!fib6_info_hold_safe(rt))
+       if (!fib6_info_hold_safe(f6i))
                goto fallback;
 
+       flags = fib6_info_dst_flags(f6i);
        nrt = ip6_dst_alloc(dev_net(dev), dev, flags);
        if (!nrt) {
-               fib6_info_release(rt);
+               fib6_info_release(f6i);
                goto fallback;
        }
 
-       ip6_rt_copy_init(nrt, rt);
+       ip6_rt_copy_init(nrt, res);
        return nrt;
 
 fallback:
@@ -1050,7 +1095,7 @@ static struct rt6_info *ip6_pol_route_lookup(struct net *net,
                                             const struct sk_buff *skb,
                                             int flags)
 {
-       struct fib6_info *f6i;
+       struct fib6_result res = {};
        struct fib6_node *fn;
        struct rt6_info *rt;
 
@@ -1060,37 +1105,38 @@ static struct rt6_info *ip6_pol_route_lookup(struct net *net,
        rcu_read_lock();
        fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
 restart:
-       f6i = rcu_dereference(fn->leaf);
-       if (!f6i) {
-               f6i = net->ipv6.fib6_null_entry;
-       } else {
-               f6i = rt6_device_match(net, f6i, &fl6->saddr,
-                                     fl6->flowi6_oif, flags);
-               if (f6i->fib6_nsiblings && fl6->flowi6_oif == 0)
-                       f6i = fib6_multipath_select(net, f6i, fl6,
-                                                   fl6->flowi6_oif, skb,
-                                                   flags);
-       }
-       if (f6i == net->ipv6.fib6_null_entry) {
+       res.f6i = rcu_dereference(fn->leaf);
+       if (!res.f6i)
+               res.f6i = net->ipv6.fib6_null_entry;
+       else
+               rt6_device_match(net, &res, &fl6->saddr, fl6->flowi6_oif,
+                                flags);
+
+       if (res.f6i == net->ipv6.fib6_null_entry) {
                fn = fib6_backtrack(fn, &fl6->saddr);
                if (fn)
                        goto restart;
+
+               rt = net->ipv6.ip6_null_entry;
+               dst_hold(&rt->dst);
+               goto out;
        }
 
-       trace_fib6_table_lookup(net, f6i, table, fl6);
+       fib6_select_path(net, &res, fl6, fl6->flowi6_oif,
+                        fl6->flowi6_oif != 0, skb, flags);
 
        /* Search through exception table */
-       rt = rt6_find_cached_rt(f6i, &fl6->daddr, &fl6->saddr);
+       rt = rt6_find_cached_rt(&res, &fl6->daddr, &fl6->saddr);
        if (rt) {
                if (ip6_hold_safe(net, &rt))
                        dst_use_noref(&rt->dst, jiffies);
-       } else if (f6i == net->ipv6.fib6_null_entry) {
-               rt = net->ipv6.ip6_null_entry;
-               dst_hold(&rt->dst);
        } else {
-               rt = ip6_create_rt_rcu(f6i);
+               rt = ip6_create_rt_rcu(&res);
        }
 
+out:
+       trace_fib6_table_lookup(net, &res, table, fl6);
+
        rcu_read_unlock();
 
        return rt;
@@ -1156,10 +1202,11 @@ int ip6_ins_rt(struct net *net, struct fib6_info *rt)
        return __ip6_ins_rt(rt, &info, NULL);
 }
 
-static struct rt6_info *ip6_rt_cache_alloc(struct fib6_info *ort,
+static struct rt6_info *ip6_rt_cache_alloc(const struct fib6_result *res,
                                           const struct in6_addr *daddr,
                                           const struct in6_addr *saddr)
 {
+       struct fib6_info *f6i = res->f6i;
        struct net_device *dev;
        struct rt6_info *rt;
 
@@ -1167,25 +1214,25 @@ static struct rt6_info *ip6_rt_cache_alloc(struct fib6_info *ort,
         *      Clone the route.
         */
 
-       if (!fib6_info_hold_safe(ort))
+       if (!fib6_info_hold_safe(f6i))
                return NULL;
 
-       dev = ip6_rt_get_dev_rcu(ort);
+       dev = ip6_rt_get_dev_rcu(res);
        rt = ip6_dst_alloc(dev_net(dev), dev, 0);
        if (!rt) {
-               fib6_info_release(ort);
+               fib6_info_release(f6i);
                return NULL;
        }
 
-       ip6_rt_copy_init(rt, ort);
+       ip6_rt_copy_init(rt, res);
        rt->rt6i_flags |= RTF_CACHE;
        rt->dst.flags |= DST_HOST;
        rt->rt6i_dst.addr = *daddr;
        rt->rt6i_dst.plen = 128;
 
-       if (!rt6_is_gw_or_nonexthop(ort)) {
-               if (ort->fib6_dst.plen != 128 &&
-                   ipv6_addr_equal(&ort->fib6_dst.addr, daddr))
+       if (!rt6_is_gw_or_nonexthop(res)) {
+               if (f6i->fib6_dst.plen != 128 &&
+                   ipv6_addr_equal(&f6i->fib6_dst.addr, daddr))
                        rt->rt6i_flags |= RTF_ANYCAST;
 #ifdef CONFIG_IPV6_SUBTREES
                if (rt->rt6i_src.plen && saddr) {
@@ -1198,34 +1245,35 @@ static struct rt6_info *ip6_rt_cache_alloc(struct fib6_info *ort,
        return rt;
 }
 
-static struct rt6_info *ip6_rt_pcpu_alloc(struct fib6_info *rt)
+static struct rt6_info *ip6_rt_pcpu_alloc(const struct fib6_result *res)
 {
-       unsigned short flags = fib6_info_dst_flags(rt);
+       struct fib6_info *f6i = res->f6i;
+       unsigned short flags = fib6_info_dst_flags(f6i);
        struct net_device *dev;
        struct rt6_info *pcpu_rt;
 
-       if (!fib6_info_hold_safe(rt))
+       if (!fib6_info_hold_safe(f6i))
                return NULL;
 
        rcu_read_lock();
-       dev = ip6_rt_get_dev_rcu(rt);
+       dev = ip6_rt_get_dev_rcu(res);
        pcpu_rt = ip6_dst_alloc(dev_net(dev), dev, flags);
        rcu_read_unlock();
        if (!pcpu_rt) {
-               fib6_info_release(rt);
+               fib6_info_release(f6i);
                return NULL;
        }
-       ip6_rt_copy_init(pcpu_rt, rt);
+       ip6_rt_copy_init(pcpu_rt, res);
        pcpu_rt->rt6i_flags |= RTF_PCPU;
        return pcpu_rt;
 }
 
 /* It should be called with rcu_read_lock() acquired */
-static struct rt6_info *rt6_get_pcpu_route(struct fib6_info *rt)
+static struct rt6_info *rt6_get_pcpu_route(const struct fib6_result *res)
 {
        struct rt6_info *pcpu_rt, **p;
 
-       p = this_cpu_ptr(rt->rt6i_pcpu);
+       p = this_cpu_ptr(res->f6i->rt6i_pcpu);
        pcpu_rt = *p;
 
        if (pcpu_rt)
@@ -1235,18 +1283,18 @@ static struct rt6_info *rt6_get_pcpu_route(struct fib6_info *rt)
 }
 
 static struct rt6_info *rt6_make_pcpu_route(struct net *net,
-                                           struct fib6_info *rt)
+                                           const struct fib6_result *res)
 {
        struct rt6_info *pcpu_rt, *prev, **p;
 
-       pcpu_rt = ip6_rt_pcpu_alloc(rt);
+       pcpu_rt = ip6_rt_pcpu_alloc(res);
        if (!pcpu_rt) {
                dst_hold(&net->ipv6.ip6_null_entry->dst);
                return net->ipv6.ip6_null_entry;
        }
 
        dst_hold(&pcpu_rt->dst);
-       p = this_cpu_ptr(rt->rt6i_pcpu);
+       p = this_cpu_ptr(res->f6i->rt6i_pcpu);
        prev = cmpxchg(p, NULL, pcpu_rt);
        BUG_ON(prev);
 
@@ -1389,14 +1437,15 @@ __rt6_find_exception_rcu(struct rt6_exception_bucket **bucket,
        return NULL;
 }
 
-static unsigned int fib6_mtu(const struct fib6_info *rt)
+static unsigned int fib6_mtu(const struct fib6_result *res)
 {
+       const struct fib6_nh *nh = res->nh;
        unsigned int mtu;
 
-       if (rt->fib6_pmtu) {
-               mtu = rt->fib6_pmtu;
+       if (res->f6i->fib6_pmtu) {
+               mtu = res->f6i->fib6_pmtu;
        } else {
-               struct net_device *dev = fib6_info_nh_dev(rt);
+               struct net_device *dev = nh->fib_nh_dev;
                struct inet6_dev *idev;
 
                rcu_read_lock();
@@ -1407,26 +1456,27 @@ static unsigned int fib6_mtu(const struct fib6_info *rt)
 
        mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);
 
-       return mtu - lwtunnel_headroom(rt->fib6_nh.fib_nh_lws, mtu);
+       return mtu - lwtunnel_headroom(nh->fib_nh_lws, mtu);
 }
 
 static int rt6_insert_exception(struct rt6_info *nrt,
-                               struct fib6_info *ort)
+                               const struct fib6_result *res)
 {
        struct net *net = dev_net(nrt->dst.dev);
        struct rt6_exception_bucket *bucket;
        struct in6_addr *src_key = NULL;
        struct rt6_exception *rt6_ex;
+       struct fib6_info *f6i = res->f6i;
        int err = 0;
 
        spin_lock_bh(&rt6_exception_lock);
 
-       if (ort->exception_bucket_flushed) {
+       if (f6i->exception_bucket_flushed) {
                err = -EINVAL;
                goto out;
        }
 
-       bucket = rcu_dereference_protected(ort->rt6i_exception_bucket,
+       bucket = rcu_dereference_protected(f6i->rt6i_exception_bucket,
                                        lockdep_is_held(&rt6_exception_lock));
        if (!bucket) {
                bucket = kcalloc(FIB6_EXCEPTION_BUCKET_SIZE, sizeof(*bucket),
@@ -1435,24 +1485,24 @@ static int rt6_insert_exception(struct rt6_info *nrt,
                        err = -ENOMEM;
                        goto out;
                }
-               rcu_assign_pointer(ort->rt6i_exception_bucket, bucket);
+               rcu_assign_pointer(f6i->rt6i_exception_bucket, bucket);
        }
 
 #ifdef CONFIG_IPV6_SUBTREES
-       /* rt6i_src.plen != 0 indicates ort is in subtree
+       /* fib6_src.plen != 0 indicates f6i is in subtree
         * and exception table is indexed by a hash of
-        * both rt6i_dst and rt6i_src.
+        * both fib6_dst and fib6_src.
         * Otherwise, the exception table is indexed by
-        * a hash of only rt6i_dst.
+        * a hash of only fib6_dst.
         */
-       if (ort->fib6_src.plen)
+       if (f6i->fib6_src.plen)
                src_key = &nrt->rt6i_src.addr;
 #endif
-       /* rt6_mtu_change() might lower mtu on ort.
+       /* rt6_mtu_change() might lower mtu on f6i.
         * Only insert this exception route if its mtu
-        * is less than ort's mtu value.
+        * is less than f6i's mtu value.
         */
-       if (dst_metric_raw(&nrt->dst, RTAX_MTU) >= fib6_mtu(ort)) {
+       if (dst_metric_raw(&nrt->dst, RTAX_MTU) >= fib6_mtu(res)) {
                err = -EINVAL;
                goto out;
        }
@@ -1481,9 +1531,9 @@ static int rt6_insert_exception(struct rt6_info *nrt,
 
        /* Update fn->fn_sernum to invalidate all cached dst */
        if (!err) {
-               spin_lock_bh(&ort->fib6_table->tb6_lock);
-               fib6_update_sernum(net, ort);
-               spin_unlock_bh(&ort->fib6_table->tb6_lock);
+               spin_lock_bh(&f6i->fib6_table->tb6_lock);
+               fib6_update_sernum(net, f6i);
+               spin_unlock_bh(&f6i->fib6_table->tb6_lock);
                fib6_force_start_gc(net);
        }
 
@@ -1520,33 +1570,33 @@ void rt6_flush_exceptions(struct fib6_info *rt)
 /* Find cached rt in the hash table inside passed in rt
  * Caller has to hold rcu_read_lock()
  */
-static struct rt6_info *rt6_find_cached_rt(struct fib6_info *rt,
+static struct rt6_info *rt6_find_cached_rt(const struct fib6_result *res,
                                           struct in6_addr *daddr,
                                           struct in6_addr *saddr)
 {
        struct rt6_exception_bucket *bucket;
        struct in6_addr *src_key = NULL;
        struct rt6_exception *rt6_ex;
-       struct rt6_info *res = NULL;
+       struct rt6_info *ret = NULL;
 
-       bucket = rcu_dereference(rt->rt6i_exception_bucket);
+       bucket = rcu_dereference(res->f6i->rt6i_exception_bucket);
 
 #ifdef CONFIG_IPV6_SUBTREES
-       /* rt6i_src.plen != 0 indicates rt is in subtree
+       /* fib6i_src.plen != 0 indicates f6i is in subtree
         * and exception table is indexed by a hash of
-        * both rt6i_dst and rt6i_src.
+        * both fib6_dst and fib6_src.
         * Otherwise, the exception table is indexed by
-        * a hash of only rt6i_dst.
+        * a hash of only fib6_dst.
         */
-       if (rt->fib6_src.plen)
+       if (res->f6i->fib6_src.plen)
                src_key = saddr;
 #endif
        rt6_ex = __rt6_find_exception_rcu(&bucket, daddr, src_key);
 
        if (rt6_ex && !rt6_check_expired(rt6_ex->rt6i))
-               res = rt6_ex->rt6i;
+               ret = rt6_ex->rt6i;
 
-       return res;
+       return ret;
 }
 
 /* Remove the passed in cached rt from the hash table that contains it */
@@ -1794,11 +1844,10 @@ void rt6_age_exceptions(struct fib6_info *rt,
 }
 
 /* must be called with rcu lock held */
-struct fib6_info *fib6_table_lookup(struct net *net, struct fib6_table *table,
-                                   int oif, struct flowi6 *fl6, int strict)
+int fib6_table_lookup(struct net *net, struct fib6_table *table, int oif,
+                     struct flowi6 *fl6, struct fib6_result *res, int strict)
 {
        struct fib6_node *fn, *saved_fn;
-       struct fib6_info *f6i;
 
        fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
        saved_fn = fn;
@@ -1807,8 +1856,8 @@ struct fib6_info *fib6_table_lookup(struct net *net, struct fib6_table *table,
                oif = 0;
 
 redo_rt6_select:
-       f6i = rt6_select(net, fn, oif, strict);
-       if (f6i == net->ipv6.fib6_null_entry) {
+       rt6_select(net, fn, oif, res, strict);
+       if (res->f6i == net->ipv6.fib6_null_entry) {
                fn = fib6_backtrack(fn, &fl6->saddr);
                if (fn)
                        goto redo_rt6_select;
@@ -1820,16 +1869,16 @@ struct fib6_info *fib6_table_lookup(struct net *net, struct fib6_table *table,
                }
        }
 
-       trace_fib6_table_lookup(net, f6i, table, fl6);
+       trace_fib6_table_lookup(net, res, table, fl6);
 
-       return f6i;
+       return 0;
 }
 
 struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table,
                               int oif, struct flowi6 *fl6,
                               const struct sk_buff *skb, int flags)
 {
-       struct fib6_info *f6i;
+       struct fib6_result res = {};
        struct rt6_info *rt;
        int strict = 0;
 
@@ -1840,19 +1889,18 @@ struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table,
 
        rcu_read_lock();
 
-       f6i = fib6_table_lookup(net, table, oif, fl6, strict);
-       if (f6i->fib6_nsiblings)
-               f6i = fib6_multipath_select(net, f6i, fl6, oif, skb, strict);
-
-       if (f6i == net->ipv6.fib6_null_entry) {
+       fib6_table_lookup(net, table, oif, fl6, &res, strict);
+       if (res.f6i == net->ipv6.fib6_null_entry) {
                rt = net->ipv6.ip6_null_entry;
                rcu_read_unlock();
                dst_hold(&rt->dst);
                return rt;
        }
 
+       fib6_select_path(net, &res, fl6, oif, false, skb, strict);
+
        /*Search through exception table */
-       rt = rt6_find_cached_rt(f6i, &fl6->daddr, &fl6->saddr);
+       rt = rt6_find_cached_rt(&res, &fl6->daddr, &fl6->saddr);
        if (rt) {
                if (ip6_hold_safe(net, &rt))
                        dst_use_noref(&rt->dst, jiffies);
@@ -1860,7 +1908,7 @@ struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table,
                rcu_read_unlock();
                return rt;
        } else if (unlikely((fl6->flowi6_flags & FLOWI_FLAG_KNOWN_NH) &&
-                           !f6i->fib6_nh.fib_nh_has_gw)) {
+                           !res.nh->fib_nh_gw_family)) {
                /* Create a RTF_CACHE clone which will not be
                 * owned by the fib6 tree.  It is for the special case where
                 * the daddr in the skb during the neighbor look-up is different
@@ -1868,7 +1916,7 @@ struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table,
                 */
                struct rt6_info *uncached_rt;
 
-               uncached_rt = ip6_rt_cache_alloc(f6i, &fl6->daddr, NULL);
+               uncached_rt = ip6_rt_cache_alloc(&res, &fl6->daddr, NULL);
 
                rcu_read_unlock();
 
@@ -1890,10 +1938,10 @@ struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table,
                struct rt6_info *pcpu_rt;
 
                local_bh_disable();
-               pcpu_rt = rt6_get_pcpu_route(f6i);
+               pcpu_rt = rt6_get_pcpu_route(&res);
 
                if (!pcpu_rt)
-                       pcpu_rt = rt6_make_pcpu_route(net, f6i);
+                       pcpu_rt = rt6_make_pcpu_route(net, &res);
 
                local_bh_enable();
                rcu_read_unlock();
@@ -2312,15 +2360,23 @@ static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
                if (rt6->rt6i_flags & RTF_CACHE)
                        rt6_update_exception_stamp_rt(rt6);
        } else if (daddr) {
-               struct fib6_info *from;
+               struct fib6_result res = {};
                struct rt6_info *nrt6;
 
                rcu_read_lock();
-               from = rcu_dereference(rt6->from);
-               nrt6 = ip6_rt_cache_alloc(from, daddr, saddr);
+               res.f6i = rcu_dereference(rt6->from);
+               if (!res.f6i) {
+                       rcu_read_unlock();
+                       return;
+               }
+               res.nh = &res.f6i->fib6_nh;
+               res.fib6_flags = res.f6i->fib6_flags;
+               res.fib6_type = res.f6i->fib6_type;
+
+               nrt6 = ip6_rt_cache_alloc(&res, daddr, saddr);
                if (nrt6) {
                        rt6_do_update_pmtu(nrt6, mtu);
-                       if (rt6_insert_exception(nrt6, from))
+                       if (rt6_insert_exception(nrt6, &res))
                                dst_release_immediate(&nrt6->dst);
                }
                rcu_read_unlock();
@@ -2393,6 +2449,36 @@ void ip6_sk_dst_store_flow(struct sock *sk, struct dst_entry *dst,
                      NULL);
 }
 
+static bool ip6_redirect_nh_match(const struct fib6_result *res,
+                                 struct flowi6 *fl6,
+                                 const struct in6_addr *gw,
+                                 struct rt6_info **ret)
+{
+       const struct fib6_nh *nh = res->nh;
+
+       if (nh->fib_nh_flags & RTNH_F_DEAD || !nh->fib_nh_gw_family ||
+           fl6->flowi6_oif != nh->fib_nh_dev->ifindex)
+               return false;
+
+       /* rt_cache's gateway might be different from its 'parent'
+        * in the case of an ip redirect.
+        * So we keep searching in the exception table if the gateway
+        * is different.
+        */
+       if (!ipv6_addr_equal(gw, &nh->fib_nh_gw6)) {
+               struct rt6_info *rt_cache;
+
+               rt_cache = rt6_find_cached_rt(res, &fl6->daddr, &fl6->saddr);
+               if (rt_cache &&
+                   ipv6_addr_equal(gw, &rt_cache->rt6i_gateway)) {
+                       *ret = rt_cache;
+                       return true;
+               }
+               return false;
+       }
+       return true;
+}
+
 /* Handle redirects */
 struct ip6rd_flowi {
        struct flowi6 fl6;
@@ -2406,7 +2492,8 @@ static struct rt6_info *__ip6_route_redirect(struct net *net,
                                             int flags)
 {
        struct ip6rd_flowi *rdfl = (struct ip6rd_flowi *)fl6;
-       struct rt6_info *ret = NULL, *rt_cache;
+       struct rt6_info *ret = NULL;
+       struct fib6_result res = {};
        struct fib6_info *rt;
        struct fib6_node *fn;
 
@@ -2424,34 +2511,15 @@ static struct rt6_info *__ip6_route_redirect(struct net *net,
        fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
 restart:
        for_each_fib6_node_rt_rcu(fn) {
-               if (rt->fib6_nh.fib_nh_flags & RTNH_F_DEAD)
-                       continue;
+               res.f6i = rt;
+               res.nh = &rt->fib6_nh;
+
                if (fib6_check_expired(rt))
                        continue;
                if (rt->fib6_flags & RTF_REJECT)
                        break;
-               if (!rt->fib6_nh.fib_nh_has_gw)
-                       continue;
-               if (fl6->flowi6_oif != rt->fib6_nh.fib_nh_dev->ifindex)
-                       continue;
-               /* rt_cache's gateway might be different from its 'parent'
-                * in the case of an ip redirect.
-                * So we keep searching in the exception table if the gateway
-                * is different.
-                */
-               if (!ipv6_addr_equal(&rdfl->gateway, &rt->fib6_nh.fib_nh_gw6)) {
-                       rt_cache = rt6_find_cached_rt(rt,
-                                                     &fl6->daddr,
-                                                     &fl6->saddr);
-                       if (rt_cache &&
-                           ipv6_addr_equal(&rdfl->gateway,
-                                           &rt_cache->rt6i_gateway)) {
-                               ret = rt_cache;
-                               break;
-                       }
-                       continue;
-               }
-               break;
+               if (ip6_redirect_nh_match(&res, fl6, &rdfl->gateway, &ret))
+                       goto out;
        }
 
        if (!rt)
@@ -2467,15 +2535,20 @@ static struct rt6_info *__ip6_route_redirect(struct net *net,
                        goto restart;
        }
 
+       res.f6i = rt;
+       res.nh = &rt->fib6_nh;
 out:
-       if (ret)
+       if (ret) {
                ip6_hold_safe(net, &ret);
-       else
-               ret = ip6_create_rt_rcu(rt);
+       } else {
+               res.fib6_flags = res.f6i->fib6_flags;
+               res.fib6_type = res.f6i->fib6_type;
+               ret = ip6_create_rt_rcu(&res);
+       }
 
        rcu_read_unlock();
 
-       trace_fib6_table_lookup(net, rt, table, fl6);
+       trace_fib6_table_lookup(net, &res, table, fl6);
        return ret;
 };
 
@@ -2593,12 +2666,15 @@ static unsigned int ip6_mtu(const struct dst_entry *dst)
  * based on ip6_dst_mtu_forward and exception logic of
  * rt6_find_cached_rt; called with rcu_read_lock
  */
-u32 ip6_mtu_from_fib6(struct fib6_info *f6i, struct in6_addr *daddr,
-                     struct in6_addr *saddr)
+u32 ip6_mtu_from_fib6(const struct fib6_result *res,
+                     const struct in6_addr *daddr,
+                     const struct in6_addr *saddr)
 {
        struct rt6_exception_bucket *bucket;
+       const struct fib6_nh *nh = res->nh;
+       struct fib6_info *f6i = res->f6i;
+       const struct in6_addr *src_key;
        struct rt6_exception *rt6_ex;
-       struct in6_addr *src_key;
        struct inet6_dev *idev;
        u32 mtu = 0;
 
@@ -2620,7 +2696,7 @@ u32 ip6_mtu_from_fib6(struct fib6_info *f6i, struct in6_addr *daddr,
                mtu = dst_metric_raw(&rt6_ex->rt6i->dst, RTAX_MTU);
 
        if (likely(!mtu)) {
-               struct net_device *dev = fib6_info_nh_dev(f6i);
+               struct net_device *dev = nh->fib_nh_dev;
 
                mtu = IPV6_MIN_MTU;
                idev = __in6_dev_get(dev);
@@ -2630,7 +2706,7 @@ u32 ip6_mtu_from_fib6(struct fib6_info *f6i, struct in6_addr *daddr,
 
        mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);
 out:
-       return mtu - lwtunnel_headroom(fib6_info_nh_lwt(f6i), mtu);
+       return mtu - lwtunnel_headroom(nh->fib_nh_lws, mtu);
 }
 
 struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
@@ -2964,7 +3040,7 @@ int fib6_nh_init(struct net *net, struct fib6_nh *fib6_nh,
                        goto out;
 
                fib6_nh->fib_nh_gw6 = cfg->fc_gateway;
-               fib6_nh->fib_nh_has_gw = 1;
+               fib6_nh->fib_nh_gw_family = AF_INET6;
        }
 
        err = -ENODEV;
@@ -3282,9 +3358,13 @@ static int ip6_route_del(struct fib6_config *cfg,
                        struct fib6_nh *nh;
 
                        if (cfg->fc_flags & RTF_CACHE) {
+                               struct fib6_result res = {
+                                       .f6i = rt,
+                               };
                                int rc;
 
-                               rt_cache = rt6_find_cached_rt(rt, &cfg->fc_dst,
+                               rt_cache = rt6_find_cached_rt(&res,
+                                                             &cfg->fc_dst,
                                                              &cfg->fc_src);
                                if (rt_cache) {
                                        rc = ip6_del_cached_rt(rt_cache, cfg);
@@ -3328,10 +3408,10 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu
 {
        struct netevent_redirect netevent;
        struct rt6_info *rt, *nrt = NULL;
+       struct fib6_result res = {};
        struct ndisc_options ndopts;
        struct inet6_dev *in6_dev;
        struct neighbour *neigh;
-       struct fib6_info *from;
        struct rd_msg *msg;
        int optlen, on_link;
        u8 *lladdr;
@@ -3414,14 +3494,17 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu
                     NDISC_REDIRECT, &ndopts);
 
        rcu_read_lock();
-       from = rcu_dereference(rt->from);
+       res.f6i = rcu_dereference(rt->from);
        /* This fib6_info_hold() is safe here because we hold reference to rt
         * and rt already holds reference to fib6_info.
         */
-       fib6_info_hold(from);
+       fib6_info_hold(res.f6i);
        rcu_read_unlock();
 
-       nrt = ip6_rt_cache_alloc(from, &msg->dest, NULL);
+       res.nh = &res.f6i->fib6_nh;
+       res.fib6_flags = res.f6i->fib6_flags;
+       res.fib6_type = res.f6i->fib6_type;
+       nrt = ip6_rt_cache_alloc(&res, &msg->dest, NULL);
        if (!nrt)
                goto out;
 
@@ -3435,7 +3518,7 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu
         * a cached route because rt6_insert_exception() will
         * takes care of it
         */
-       if (rt6_insert_exception(nrt, from)) {
+       if (rt6_insert_exception(nrt, &res)) {
                dst_release_immediate(&nrt->dst);
                goto out;
        }
@@ -3447,7 +3530,7 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu
        call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);
 
 out:
-       fib6_info_release(from);
+       fib6_info_release(res.f6i);
        neigh_release(neigh);
 }
 
@@ -3476,7 +3559,7 @@ static struct fib6_info *rt6_get_route_info(struct net *net,
                if (rt->fib6_nh.fib_nh_dev->ifindex != ifindex)
                        continue;
                if (!(rt->fib6_flags & RTF_ROUTEINFO) ||
-                   !rt->fib6_nh.fib_nh_has_gw)
+                   !rt->fib6_nh.fib_nh_gw_family)
                        continue;
                if (!ipv6_addr_equal(&rt->fib6_nh.fib_nh_gw6, gwaddr))
                        continue;
@@ -3807,7 +3890,7 @@ static int fib6_clean_tohost(struct fib6_info *rt, void *arg)
        struct in6_addr *gateway = (struct in6_addr *)arg;
 
        if (((rt->fib6_flags & RTF_RA_ROUTER) == RTF_RA_ROUTER) &&
-           rt->fib6_nh.fib_nh_has_gw &&
+           rt->fib6_nh.fib_nh_gw_family &&
            ipv6_addr_equal(gateway, &rt->fib6_nh.fib_nh_gw6)) {
                return -1;
        }
@@ -4582,73 +4665,6 @@ static size_t rt6_nlmsg_size(struct fib6_info *rt)
               + nexthop_len;
 }
 
-static int rt6_nexthop_info(struct sk_buff *skb, const struct fib6_nh *fib6_nh,
-                           unsigned int *flags, bool skip_oif)
-{
-       if (fib6_nh->fib_nh_flags & RTNH_F_DEAD)
-               *flags |= RTNH_F_DEAD;
-
-       if (fib6_nh->fib_nh_flags & RTNH_F_LINKDOWN) {
-               *flags |= RTNH_F_LINKDOWN;
-
-               rcu_read_lock();
-               if (ip6_ignore_linkdown(fib6_nh->fib_nh_dev))
-                       *flags |= RTNH_F_DEAD;
-               rcu_read_unlock();
-       }
-
-       if (fib6_nh->fib_nh_has_gw) {
-               if (nla_put_in6_addr(skb, RTA_GATEWAY, &fib6_nh->fib_nh_gw6) < 0)
-                       goto nla_put_failure;
-       }
-
-       *flags |= (fib6_nh->fib_nh_flags & RTNH_F_ONLINK);
-       if (fib6_nh->fib_nh_flags & RTNH_F_OFFLOAD)
-               *flags |= RTNH_F_OFFLOAD;
-
-       /* not needed for multipath encoding b/c it has a rtnexthop struct */
-       if (!skip_oif && fib6_nh->fib_nh_dev &&
-           nla_put_u32(skb, RTA_OIF, fib6_nh->fib_nh_dev->ifindex))
-               goto nla_put_failure;
-
-       if (fib6_nh->fib_nh_lws &&
-           lwtunnel_fill_encap(skb, fib6_nh->fib_nh_lws) < 0)
-               goto nla_put_failure;
-
-       return 0;
-
-nla_put_failure:
-       return -EMSGSIZE;
-}
-
-/* add multipath next hop */
-static int rt6_add_nexthop(struct sk_buff *skb, const struct fib6_nh *fib6_nh)
-{
-       const struct net_device *dev = fib6_nh->fib_nh_dev;
-       struct rtnexthop *rtnh;
-       unsigned int flags = 0;
-
-       rtnh = nla_reserve_nohdr(skb, sizeof(*rtnh));
-       if (!rtnh)
-               goto nla_put_failure;
-
-       rtnh->rtnh_hops = fib6_nh->fib_nh_weight - 1;
-       rtnh->rtnh_ifindex = dev ? dev->ifindex : 0;
-
-       if (rt6_nexthop_info(skb, fib6_nh, &flags, true) < 0)
-               goto nla_put_failure;
-
-       rtnh->rtnh_flags = flags;
-
-       /* length of rtnetlink header + attributes */
-       rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *)rtnh;
-
-       return 0;
-
-nla_put_failure:
-       return -EMSGSIZE;
-}
-
 static int rt6_fill_node(struct net *net, struct sk_buff *skb,
                         struct fib6_info *rt, struct dst_entry *dst,
                         struct in6_addr *dest, struct in6_addr *src,
@@ -4765,19 +4781,21 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
                if (!mp)
                        goto nla_put_failure;
 
-               if (rt6_add_nexthop(skb, &rt->fib6_nh) < 0)
+               if (fib_add_nexthop(skb, &rt->fib6_nh.nh_common,
+                                   rt->fib6_nh.fib_nh_weight) < 0)
                        goto nla_put_failure;
 
                list_for_each_entry_safe(sibling, next_sibling,
                                         &rt->fib6_siblings, fib6_siblings) {
-                       if (rt6_add_nexthop(skb, &sibling->fib6_nh) < 0)
+                       if (fib_add_nexthop(skb, &sibling->fib6_nh.nh_common,
+                                           sibling->fib6_nh.fib_nh_weight) < 0)
                                goto nla_put_failure;
                }
 
                nla_nest_end(skb, mp);
        } else {
-               if (rt6_nexthop_info(skb, &rt->fib6_nh, &rtm->rtm_flags,
-                                    false) < 0)
+               if (fib_nexthop_info(skb, &rt->fib6_nh.nh_common,
+                                    &rtm->rtm_flags, false) < 0)
                        goto nla_put_failure;
        }
 
index 07e21a82ce4cc2e41af8e38961f9917d357fd20b..b2109b74857d053f52b06c42698cc393d5838609 100644 (file)
@@ -669,6 +669,10 @@ static int ipip6_rcv(struct sk_buff *skb)
                    !net_eq(tunnel->net, dev_net(tunnel->dev))))
                        goto out;
 
+               /* skb can be uncloned in iptunnel_pull_header, so
+                * old iph is no longer valid
+                */
+               iph = (const struct iphdr *)skb_mac_header(skb);
                err = IP_ECN_decapsulate(iph, skb);
                if (unlikely(err)) {
                        if (log_ecn_error)
index eec814fe53b817106bc1d1eaa89dadcb96c974fa..82018bdce863165eba72e1ccf0c12ee558042ae8 100644 (file)
@@ -93,12 +93,13 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
 /* Helper returning the inet6 address from a given tcp socket.
  * It can be used in TCP stack instead of inet6_sk(sk).
  * This avoids a dereference and allow compiler optimizations.
+ * It is a specialized version of inet6_sk_generic().
  */
 static struct ipv6_pinfo *tcp_inet6_sk(const struct sock *sk)
 {
-       struct tcp6_sock *tcp6 = container_of(tcp_sk(sk), struct tcp6_sock, tcp);
+       unsigned int offset = sizeof(struct tcp6_sock) - sizeof(struct ipv6_pinfo);
 
-       return &tcp6->inet6;
+       return (struct ipv6_pinfo *)(((u8 *)sk) + offset);
 }
 
 static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
index b444483cdb2b42ef7acdbd7d23a0c046f55077c2..2464fba569b4eb39452bd81f94cbe80227368fc9 100644 (file)
@@ -285,8 +285,7 @@ int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
        struct inet_sock *inet = inet_sk(sk);
        struct sk_buff *skb;
        unsigned int ulen, copied;
-       int peeked, peeking, off;
-       int err;
+       int off, err, peeking = flags & MSG_PEEK;
        int is_udplite = IS_UDPLITE(sk);
        struct udp_mib __percpu *mib;
        bool checksum_valid = false;
@@ -299,9 +298,8 @@ int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
                return ipv6_recv_rxpmtu(sk, msg, len, addr_len);
 
 try_again:
-       peeking = flags & MSG_PEEK;
        off = sk_peek_offset(sk, flags);
-       skb = __skb_recv_udp(sk, flags, noblock, &peeked, &off, &err);
+       skb = __skb_recv_udp(sk, flags, noblock, &off, &err);
        if (!skb)
                return err;
 
@@ -340,14 +338,14 @@ int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
                        goto csum_copy_err;
        }
        if (unlikely(err)) {
-               if (!peeked) {
+               if (!peeking) {
                        atomic_inc(&sk->sk_drops);
                        SNMP_INC_STATS(mib, UDP_MIB_INERRORS);
                }
                kfree_skb(skb);
                return err;
        }
-       if (!peeked)
+       if (!peeking)
                SNMP_INC_STATS(mib, UDP_MIB_INDATAGRAMS);
 
        sock_recv_ts_and_drops(msg, sk, skb);
@@ -1047,6 +1045,8 @@ static void udp_v6_flush_pending_frames(struct sock *sk)
 static int udpv6_pre_connect(struct sock *sk, struct sockaddr *uaddr,
                             int addr_len)
 {
+       if (addr_len < offsetofend(struct sockaddr, sa_family))
+               return -EINVAL;
        /* The following checks are replicated from __ip6_datagram_connect()
         * and intended to prevent BPF program called below from accessing
         * bytes that are out of the bound specified by user in addr_len.
index c5c5ab6c5a1ccdf55eb7891e8c21ea3cdf7d2a28..44fdc641710dbdb64cedb3306ab822573cb97477 100644 (file)
@@ -2054,14 +2054,14 @@ static int __init kcm_init(void)
        if (err)
                goto fail;
 
-       err = sock_register(&kcm_family_ops);
-       if (err)
-               goto sock_register_fail;
-
        err = register_pernet_device(&kcm_net_ops);
        if (err)
                goto net_ops_fail;
 
+       err = sock_register(&kcm_family_ops);
+       if (err)
+               goto sock_register_fail;
+
        err = kcm_proc_init();
        if (err)
                goto proc_init_fail;
@@ -2069,12 +2069,12 @@ static int __init kcm_init(void)
        return 0;
 
 proc_init_fail:
-       unregister_pernet_device(&kcm_net_ops);
-
-net_ops_fail:
        sock_unregister(PF_KCM);
 
 sock_register_fail:
+       unregister_pernet_device(&kcm_net_ops);
+
+net_ops_fail:
        proto_unregister(&kcm_proto);
 
 fail:
@@ -2090,8 +2090,8 @@ static int __init kcm_init(void)
 static void __exit kcm_exit(void)
 {
        kcm_proc_exit();
-       unregister_pernet_device(&kcm_net_ops);
        sock_unregister(PF_KCM);
+       unregister_pernet_device(&kcm_net_ops);
        proto_unregister(&kcm_proto);
        destroy_workqueue(kcm_wq);
 
index b99e73a7e7e0f2b4959b279e3aecbadf29667d55..2017b7d780f5af73c1ac7461113842776d1b00fc 100644 (file)
@@ -320,14 +320,13 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen)
        struct llc_sap *sap;
        int rc = -EINVAL;
 
-       dprintk("%s: binding %02X\n", __func__, addr->sllc_sap);
-
        lock_sock(sk);
        if (unlikely(!sock_flag(sk, SOCK_ZAPPED) || addrlen != sizeof(*addr)))
                goto out;
        rc = -EAFNOSUPPORT;
        if (unlikely(addr->sllc_family != AF_LLC))
                goto out;
+       dprintk("%s: binding %02X\n", __func__, addr->sllc_sap);
        rc = -ENODEV;
        rcu_read_lock();
        if (sk->sk_bound_dev_if) {
index 28d022a3eee305bc9d04531eb6b70d3b57412d93..ae4f0be3b393ba727b95060bb7148ec0cd961440 100644 (file)
@@ -1195,6 +1195,9 @@ static inline void drv_wake_tx_queue(struct ieee80211_local *local,
 {
        struct ieee80211_sub_if_data *sdata = vif_to_sdata(txq->txq.vif);
 
+       if (local->in_reconfig)
+               return;
+
        if (!check_sdata_in_driver(sdata))
                return;
 
index 4700718e010f5a886001e9a0a0326a628edf0739..37e372896230a08c6a9214f88ce54e7ad823d352 100644 (file)
@@ -167,8 +167,10 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
                 * The driver doesn't know anything about VLAN interfaces.
                 * Hence, don't send GTKs for VLAN interfaces to the driver.
                 */
-               if (!(key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE))
+               if (!(key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
+                       ret = 1;
                        goto out_unsupported;
+               }
        }
 
        ret = drv_set_key(key->local, SET_KEY, sdata,
@@ -213,11 +215,8 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
                /* all of these we can do in software - if driver can */
                if (ret == 1)
                        return 0;
-               if (ieee80211_hw_check(&key->local->hw, SW_CRYPTO_CONTROL)) {
-                       if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
-                               return 0;
+               if (ieee80211_hw_check(&key->local->hw, SW_CRYPTO_CONTROL))
                        return -EINVAL;
-               }
                return 0;
        default:
                return -EINVAL;
index 95eb5064fa9166220bf67af98dedf83726ffcdc8..b76a2aefa9ec05e5162ab565a108b5b98848116f 100644 (file)
@@ -23,7 +23,7 @@ static void mesh_path_free_rcu(struct mesh_table *tbl, struct mesh_path *mpath);
 static u32 mesh_table_hash(const void *addr, u32 len, u32 seed)
 {
        /* Use last four bytes of hw addr as hash index */
-       return jhash_1word(*(u32 *)(addr+2), seed);
+       return jhash_1word(__get_unaligned_cpu32((u8 *)addr + 2), seed);
 }
 
 static const struct rhashtable_params mesh_rht_params = {
index 7f8d93401ce070f9e2e61ce6a84e5ab8768b5811..bf0b187f994e9c56e191d2045f405cb6e6bac336 100644 (file)
@@ -1568,7 +1568,15 @@ static void sta_ps_start(struct sta_info *sta)
                return;
 
        for (tid = 0; tid < IEEE80211_NUM_TIDS; tid++) {
-               if (txq_has_queue(sta->sta.txq[tid]))
+               struct ieee80211_txq *txq = sta->sta.txq[tid];
+               struct txq_info *txqi = to_txq_info(txq);
+
+               spin_lock(&local->active_txq_lock[txq->ac]);
+               if (!list_empty(&txqi->schedule_order))
+                       list_del_init(&txqi->schedule_order);
+               spin_unlock(&local->active_txq_lock[txq->ac]);
+
+               if (txq_has_queue(txq))
                        set_bit(tid, &sta->txq_buffered_tids);
                else
                        clear_bit(tid, &sta->txq_buffered_tids);
index 366b9e6f043e2df89eccb4d63a9fb3ab1d7db023..40141df09f255fac46043f67656e98e16adda5b9 100644 (file)
@@ -1,4 +1,9 @@
 /* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Portions of this file
+ * Copyright (C) 2019 Intel Corporation
+ */
+
 #ifdef CONFIG_MAC80211_MESSAGE_TRACING
 
 #if !defined(__MAC80211_MSG_DRIVER_TRACE) || defined(TRACE_HEADER_MULTI_READ)
@@ -11,7 +16,7 @@
 #undef TRACE_SYSTEM
 #define TRACE_SYSTEM mac80211_msg
 
-#define MAX_MSG_LEN    100
+#define MAX_MSG_LEN    120
 
 DECLARE_EVENT_CLASS(mac80211_msg_event,
        TP_PROTO(struct va_format *vaf),
index 8a49a74c0a374815ca2f374510216b334eb00013..2e816dd67be72d161bf1959554d293f2f6725673 100644 (file)
@@ -3221,6 +3221,7 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
        u8 max_subframes = sta->sta.max_amsdu_subframes;
        int max_frags = local->hw.max_tx_fragments;
        int max_amsdu_len = sta->sta.max_amsdu_len;
+       int orig_truesize;
        __be16 len;
        void *data;
        bool ret = false;
@@ -3261,6 +3262,7 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
        if (!head || skb_is_gso(head))
                goto out;
 
+       orig_truesize = head->truesize;
        orig_len = head->len;
 
        if (skb->len + head->len > max_amsdu_len)
@@ -3318,6 +3320,7 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
        *frag_tail = skb;
 
 out_recalc:
+       fq->memory_usage += head->truesize - orig_truesize;
        if (head->len != orig_len) {
                flow->backlog += head->len - orig_len;
                tin->backlog_bytes += head->len - orig_len;
@@ -3646,16 +3649,17 @@ EXPORT_SYMBOL(ieee80211_tx_dequeue);
 struct ieee80211_txq *ieee80211_next_txq(struct ieee80211_hw *hw, u8 ac)
 {
        struct ieee80211_local *local = hw_to_local(hw);
+       struct ieee80211_txq *ret = NULL;
        struct txq_info *txqi = NULL;
 
-       lockdep_assert_held(&local->active_txq_lock[ac]);
+       spin_lock_bh(&local->active_txq_lock[ac]);
 
  begin:
        txqi = list_first_entry_or_null(&local->active_txqs[ac],
                                        struct txq_info,
                                        schedule_order);
        if (!txqi)
-               return NULL;
+               goto out;
 
        if (txqi->txq.sta) {
                struct sta_info *sta = container_of(txqi->txq.sta,
@@ -3672,24 +3676,30 @@ struct ieee80211_txq *ieee80211_next_txq(struct ieee80211_hw *hw, u8 ac)
 
 
        if (txqi->schedule_round == local->schedule_round[ac])
-               return NULL;
+               goto out;
 
        list_del_init(&txqi->schedule_order);
        txqi->schedule_round = local->schedule_round[ac];
-       return &txqi->txq;
+       ret = &txqi->txq;
+
+out:
+       spin_unlock_bh(&local->active_txq_lock[ac]);
+       return ret;
 }
 EXPORT_SYMBOL(ieee80211_next_txq);
 
-void ieee80211_return_txq(struct ieee80211_hw *hw,
-                         struct ieee80211_txq *txq)
+void __ieee80211_schedule_txq(struct ieee80211_hw *hw,
+                             struct ieee80211_txq *txq,
+                             bool force)
 {
        struct ieee80211_local *local = hw_to_local(hw);
        struct txq_info *txqi = to_txq_info(txq);
 
-       lockdep_assert_held(&local->active_txq_lock[txq->ac]);
+       spin_lock_bh(&local->active_txq_lock[txq->ac]);
 
        if (list_empty(&txqi->schedule_order) &&
-           (!skb_queue_empty(&txqi->frags) || txqi->tin.backlog_packets)) {
+           (force || !skb_queue_empty(&txqi->frags) ||
+            txqi->tin.backlog_packets)) {
                /* If airtime accounting is active, always enqueue STAs at the
                 * head of the list to ensure that they only get moved to the
                 * back by the airtime DRR scheduler once they have a negative
@@ -3706,20 +3716,10 @@ void ieee80211_return_txq(struct ieee80211_hw *hw,
                        list_add_tail(&txqi->schedule_order,
                                      &local->active_txqs[txq->ac]);
        }
-}
-EXPORT_SYMBOL(ieee80211_return_txq);
 
-void ieee80211_schedule_txq(struct ieee80211_hw *hw,
-                           struct ieee80211_txq *txq)
-       __acquires(txq_lock) __releases(txq_lock)
-{
-       struct ieee80211_local *local = hw_to_local(hw);
-
-       spin_lock_bh(&local->active_txq_lock[txq->ac]);
-       ieee80211_return_txq(hw, txq);
        spin_unlock_bh(&local->active_txq_lock[txq->ac]);
 }
-EXPORT_SYMBOL(ieee80211_schedule_txq);
+EXPORT_SYMBOL(__ieee80211_schedule_txq);
 
 bool ieee80211_txq_may_transmit(struct ieee80211_hw *hw,
                                struct ieee80211_txq *txq)
@@ -3729,7 +3729,7 @@ bool ieee80211_txq_may_transmit(struct ieee80211_hw *hw,
        struct sta_info *sta;
        u8 ac = txq->ac;
 
-       lockdep_assert_held(&local->active_txq_lock[ac]);
+       spin_lock_bh(&local->active_txq_lock[ac]);
 
        if (!txqi->txq.sta)
                goto out;
@@ -3759,34 +3759,27 @@ bool ieee80211_txq_may_transmit(struct ieee80211_hw *hw,
 
        sta->airtime[ac].deficit += sta->airtime_weight;
        list_move_tail(&txqi->schedule_order, &local->active_txqs[ac]);
+       spin_unlock_bh(&local->active_txq_lock[ac]);
 
        return false;
 out:
        if (!list_empty(&txqi->schedule_order))
                list_del_init(&txqi->schedule_order);
+       spin_unlock_bh(&local->active_txq_lock[ac]);
 
        return true;
 }
 EXPORT_SYMBOL(ieee80211_txq_may_transmit);
 
 void ieee80211_txq_schedule_start(struct ieee80211_hw *hw, u8 ac)
-       __acquires(txq_lock)
 {
        struct ieee80211_local *local = hw_to_local(hw);
 
        spin_lock_bh(&local->active_txq_lock[ac]);
        local->schedule_round[ac]++;
-}
-EXPORT_SYMBOL(ieee80211_txq_schedule_start);
-
-void ieee80211_txq_schedule_end(struct ieee80211_hw *hw, u8 ac)
-       __releases(txq_lock)
-{
-       struct ieee80211_local *local = hw_to_local(hw);
-
        spin_unlock_bh(&local->active_txq_lock[ac]);
 }
-EXPORT_SYMBOL(ieee80211_txq_schedule_end);
+EXPORT_SYMBOL(ieee80211_txq_schedule_start);
 
 void __ieee80211_subif_start_xmit(struct sk_buff *skb,
                                  struct net_device *dev,
index f3a8557494d60e4d1ffe1f89fa32ea00c13eabab..2619c2fbea93be7d6fbc39912d6225c55e3680d0 100644 (file)
@@ -137,10 +137,14 @@ static int mpls_xmit(struct sk_buff *skb)
 
        mpls_stats_inc_outucastpkts(out_dev, skb);
 
-       if (rt)
-               err = neigh_xmit(NEIGH_ARP_TABLE, out_dev, &rt->rt_gateway,
-                                skb);
-       else if (rt6) {
+       if (rt) {
+               if (rt->rt_gw_family == AF_INET)
+                       err = neigh_xmit(NEIGH_ARP_TABLE, out_dev, &rt->rt_gw4,
+                                        skb);
+               else if (rt->rt_gw_family == AF_INET6)
+                       err = neigh_xmit(NEIGH_ND_TABLE, out_dev, &rt->rt_gw6,
+                                        skb);
+       } else if (rt6) {
                if (ipv6_addr_v4mapped(&rt6->rt6i_gateway)) {
                        /* 6PE (RFC 4798) */
                        err = neigh_xmit(NEIGH_ARP_TABLE, out_dev, &rt6->rt6i_gateway.s6_addr32[3],
index 6548271209a05c2fce99628c9b23d2cedbf8a087..02b281d3c167e51959be632d120cee43749c63de 100644 (file)
@@ -404,11 +404,6 @@ config NF_NAT
          forms of full Network Address Port Translation. This can be
          controlled by iptables, ip6tables or nft.
 
-config NF_NAT_NEEDED
-       bool
-       depends on NF_NAT
-       default y
-
 config NF_NAT_AMANDA
        tristate
        depends on NF_CONNTRACK && NF_NAT
@@ -1002,6 +997,20 @@ config NETFILTER_XT_TARGET_REDIRECT
 
        To compile it as a module, choose M here. If unsure, say N.
 
+config NETFILTER_XT_TARGET_MASQUERADE
+       tristate "MASQUERADE target support"
+       depends on NF_NAT
+       default m if NETFILTER_ADVANCED=n
+       select NF_NAT_MASQUERADE
+       help
+         Masquerading is a special case of NAT: all outgoing connections are
+         changed to seem to come from a particular interface's address, and
+         if the interface goes down, those connections are lost.  This is
+         only useful for dialup accounts with dynamic IP address (ie. your IP
+         address will be different on next dialup).
+
+         To compile it as a module, choose M here.  If unsure, say N.
+
 config NETFILTER_XT_TARGET_TEE
        tristate '"TEE" - packet cloning to alternate destination'
        depends on NETFILTER_ADVANCED
index 4894a85cdd0b02c94cfd8b3b0c8ca60c3257d537..72cca6b489604a5bdbf7fb42add5c7fabfe82f16 100644 (file)
@@ -77,7 +77,8 @@ obj-$(CONFIG_NF_DUP_NETDEV)   += nf_dup_netdev.o
 nf_tables-objs := nf_tables_core.o nf_tables_api.o nft_chain_filter.o \
                  nf_tables_trace.o nft_immediate.o nft_cmp.o nft_range.o \
                  nft_bitwise.o nft_byteorder.o nft_payload.o nft_lookup.o \
-                 nft_dynset.o nft_meta.o nft_rt.o nft_exthdr.o
+                 nft_dynset.o nft_meta.o nft_rt.o nft_exthdr.o \
+                 nft_chain_route.o
 
 nf_tables_set-objs := nf_tables_set_core.o \
                      nft_set_hash.o nft_set_bitmap.o nft_set_rbtree.o
@@ -147,6 +148,7 @@ obj-$(CONFIG_NETFILTER_XT_TARGET_NFLOG) += xt_NFLOG.o
 obj-$(CONFIG_NETFILTER_XT_TARGET_NFQUEUE) += xt_NFQUEUE.o
 obj-$(CONFIG_NETFILTER_XT_TARGET_RATEEST) += xt_RATEEST.o
 obj-$(CONFIG_NETFILTER_XT_TARGET_REDIRECT) += xt_REDIRECT.o
+obj-$(CONFIG_NETFILTER_XT_TARGET_MASQUERADE) += xt_MASQUERADE.o
 obj-$(CONFIG_NETFILTER_XT_TARGET_SECMARK) += xt_SECMARK.o
 obj-$(CONFIG_NETFILTER_XT_TARGET_TPROXY) += xt_TPROXY.o
 obj-$(CONFIG_NETFILTER_XT_TARGET_TCPMSS) += xt_TCPMSS.o
index 93aaec3a54ecdf701597457d2eddd6039275add0..71f06900473e9181b2d58b1daa9d42352c7382e6 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/mm.h>
 #include <linux/rcupdate.h>
 #include <net/net_namespace.h>
+#include <net/netfilter/nf_queue.h>
 #include <net/sock.h>
 
 #include "nf_internals.h"
index 4b933669fd831f0b6047644743f82d0b74fad349..ab119a7540db0fbed2e0c3cdbcc93af1160bfe29 100644 (file)
@@ -831,6 +831,10 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
        conn_flags = udest->conn_flags & IP_VS_CONN_F_DEST_MASK;
        conn_flags |= IP_VS_CONN_F_INACTIVE;
 
+       /* set the tunnel info */
+       dest->tun_type = udest->tun_type;
+       dest->tun_port = udest->tun_port;
+
        /* set the IP_VS_CONN_F_NOOUTPUT flag if not masquerading/NAT */
        if ((conn_flags & IP_VS_CONN_F_FWD_MASK) != IP_VS_CONN_F_MASQ) {
                conn_flags |= IP_VS_CONN_F_NOOUTPUT;
@@ -987,6 +991,13 @@ ip_vs_add_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest)
                return -ERANGE;
        }
 
+       if (udest->tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GUE) {
+               if (udest->tun_port == 0) {
+                       pr_err("%s(): tunnel port is zero\n", __func__);
+                       return -EINVAL;
+               }
+       }
+
        ip_vs_addr_copy(udest->af, &daddr, &udest->addr);
 
        /* We use function that requires RCU lock */
@@ -1051,6 +1062,13 @@ ip_vs_edit_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest)
                return -ERANGE;
        }
 
+       if (udest->tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GUE) {
+               if (udest->tun_port == 0) {
+                       pr_err("%s(): tunnel port is zero\n", __func__);
+                       return -EINVAL;
+               }
+       }
+
        ip_vs_addr_copy(udest->af, &daddr, &udest->addr);
 
        /* We use function that requires RCU lock */
@@ -2333,6 +2351,7 @@ static void ip_vs_copy_udest_compat(struct ip_vs_dest_user_kern *udest,
        udest->u_threshold      = udest_compat->u_threshold;
        udest->l_threshold      = udest_compat->l_threshold;
        udest->af               = AF_INET;
+       udest->tun_type         = IP_VS_CONN_F_TUNNEL_TYPE_IPIP;
 }
 
 static int
@@ -2890,6 +2909,8 @@ static const struct nla_policy ip_vs_dest_policy[IPVS_DEST_ATTR_MAX + 1] = {
        [IPVS_DEST_ATTR_PERSIST_CONNS]  = { .type = NLA_U32 },
        [IPVS_DEST_ATTR_STATS]          = { .type = NLA_NESTED },
        [IPVS_DEST_ATTR_ADDR_FAMILY]    = { .type = NLA_U16 },
+       [IPVS_DEST_ATTR_TUN_TYPE]       = { .type = NLA_U8 },
+       [IPVS_DEST_ATTR_TUN_PORT]       = { .type = NLA_U16 },
 };
 
 static int ip_vs_genl_fill_stats(struct sk_buff *skb, int container_type,
@@ -3193,6 +3214,10 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
                         IP_VS_CONN_F_FWD_MASK)) ||
            nla_put_u32(skb, IPVS_DEST_ATTR_WEIGHT,
                        atomic_read(&dest->weight)) ||
+           nla_put_u8(skb, IPVS_DEST_ATTR_TUN_TYPE,
+                      dest->tun_type) ||
+           nla_put_be16(skb, IPVS_DEST_ATTR_TUN_PORT,
+                        dest->tun_port) ||
            nla_put_u32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold) ||
            nla_put_u32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold) ||
            nla_put_u32(skb, IPVS_DEST_ATTR_ACTIVE_CONNS,
@@ -3315,12 +3340,14 @@ static int ip_vs_genl_parse_dest(struct ip_vs_dest_user_kern *udest,
        /* If a full entry was requested, check for the additional fields */
        if (full_entry) {
                struct nlattr *nla_fwd, *nla_weight, *nla_u_thresh,
-                             *nla_l_thresh;
+                             *nla_l_thresh, *nla_tun_type, *nla_tun_port;
 
                nla_fwd         = attrs[IPVS_DEST_ATTR_FWD_METHOD];
                nla_weight      = attrs[IPVS_DEST_ATTR_WEIGHT];
                nla_u_thresh    = attrs[IPVS_DEST_ATTR_U_THRESH];
                nla_l_thresh    = attrs[IPVS_DEST_ATTR_L_THRESH];
+               nla_tun_type    = attrs[IPVS_DEST_ATTR_TUN_TYPE];
+               nla_tun_port    = attrs[IPVS_DEST_ATTR_TUN_PORT];
 
                if (!(nla_fwd && nla_weight && nla_u_thresh && nla_l_thresh))
                        return -EINVAL;
@@ -3330,6 +3357,12 @@ static int ip_vs_genl_parse_dest(struct ip_vs_dest_user_kern *udest,
                udest->weight = nla_get_u32(nla_weight);
                udest->u_threshold = nla_get_u32(nla_u_thresh);
                udest->l_threshold = nla_get_u32(nla_l_thresh);
+
+               if (nla_tun_type)
+                       udest->tun_type = nla_get_u8(nla_tun_type);
+
+               if (nla_tun_port)
+                       udest->tun_port = nla_get_be16(nla_tun_port);
        }
 
        return 0;
index 175349fcf91f5627fdd5143d06bcacda0050ed47..8d6f94b6777293dd08c19f9522c594d33a3710b6 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/slab.h>
 #include <linux/tcp.h>                  /* for tcphdr */
 #include <net/ip.h>
+#include <net/gue.h>
 #include <net/tcp.h>                    /* for csum_tcpudp_magic */
 #include <net/udp.h>
 #include <net/icmp.h>                   /* for icmp_send */
@@ -382,6 +383,10 @@ __ip_vs_get_out_rt(struct netns_ipvs *ipvs, int skb_af, struct sk_buff *skb,
                mtu = dst_mtu(&rt->dst);
        } else {
                mtu = dst_mtu(&rt->dst) - sizeof(struct iphdr);
+               if (!dest)
+                       goto err_put;
+               if (dest->tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GUE)
+                       mtu -= sizeof(struct udphdr) + sizeof(struct guehdr);
                if (mtu < 68) {
                        IP_VS_DBG_RL("%s(): mtu less than 68\n", __func__);
                        goto err_put;
@@ -533,6 +538,10 @@ __ip_vs_get_out_rt_v6(struct netns_ipvs *ipvs, int skb_af, struct sk_buff *skb,
                mtu = dst_mtu(&rt->dst);
        else {
                mtu = dst_mtu(&rt->dst) - sizeof(struct ipv6hdr);
+               if (!dest)
+                       goto err_put;
+               if (dest->tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GUE)
+                       mtu -= sizeof(struct udphdr) + sizeof(struct guehdr);
                if (mtu < IPV6_MIN_MTU) {
                        IP_VS_DBG_RL("%s(): mtu less than %d\n", __func__,
                                     IPV6_MIN_MTU);
@@ -989,6 +998,41 @@ static inline int __tun_gso_type_mask(int encaps_af, int orig_af)
        }
 }
 
+static int
+ipvs_gue_encap(struct net *net, struct sk_buff *skb,
+              struct ip_vs_conn *cp, __u8 *next_protocol)
+{
+       __be16 dport;
+       __be16 sport = udp_flow_src_port(net, skb, 0, 0, false);
+       struct udphdr  *udph;   /* Our new UDP header */
+       struct guehdr  *gueh;   /* Our new GUE header */
+
+       skb_push(skb, sizeof(struct guehdr));
+
+       gueh = (struct guehdr *)skb->data;
+
+       gueh->control = 0;
+       gueh->version = 0;
+       gueh->hlen = 0;
+       gueh->flags = 0;
+       gueh->proto_ctype = *next_protocol;
+
+       skb_push(skb, sizeof(struct udphdr));
+       skb_reset_transport_header(skb);
+
+       udph = udp_hdr(skb);
+
+       dport = cp->dest->tun_port;
+       udph->dest = dport;
+       udph->source = sport;
+       udph->len = htons(skb->len);
+       udph->check = 0;
+
+       *next_protocol = IPPROTO_UDP;
+
+       return 0;
+}
+
 /*
  *   IP Tunneling transmitter
  *
@@ -1025,6 +1069,7 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
        struct iphdr  *iph;                     /* Our new IP header */
        unsigned int max_headroom;              /* The extra header space needed */
        int ret, local;
+       int tun_type, gso_type;
 
        EnterFunction(10);
 
@@ -1046,6 +1091,11 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
         */
        max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(struct iphdr);
 
+       tun_type = cp->dest->tun_type;
+
+       if (tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GUE)
+               max_headroom += sizeof(struct udphdr) + sizeof(struct guehdr);
+
        /* We only care about the df field if sysctl_pmtu_disc(ipvs) is set */
        dfp = sysctl_pmtu_disc(ipvs) ? &df : NULL;
        skb = ip_vs_prepare_tunneled_skb(skb, cp->af, max_headroom,
@@ -1054,11 +1104,20 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
        if (IS_ERR(skb))
                goto tx_error;
 
-       if (iptunnel_handle_offloads(skb, __tun_gso_type_mask(AF_INET, cp->af)))
+       gso_type = __tun_gso_type_mask(AF_INET, cp->af);
+       if (tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GUE)
+               gso_type |= SKB_GSO_UDP_TUNNEL;
+
+       if (iptunnel_handle_offloads(skb, gso_type))
                goto tx_error;
 
        skb->transport_header = skb->network_header;
 
+       skb_set_inner_ipproto(skb, next_protocol);
+
+       if (tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GUE)
+               ipvs_gue_encap(net, skb, cp, &next_protocol);
+
        skb_push(skb, sizeof(struct iphdr));
        skb_reset_network_header(skb);
        memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
@@ -1102,6 +1161,8 @@ int
 ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
                     struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh)
 {
+       struct netns_ipvs *ipvs = cp->ipvs;
+       struct net *net = ipvs->net;
        struct rt6_info *rt;            /* Route to the other host */
        struct in6_addr saddr;          /* Source for tunnel */
        struct net_device *tdev;        /* Device to other host */
@@ -1112,10 +1173,11 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
        struct ipv6hdr  *iph;           /* Our new IP header */
        unsigned int max_headroom;      /* The extra header space needed */
        int ret, local;
+       int tun_type, gso_type;
 
        EnterFunction(10);
 
-       local = __ip_vs_get_out_rt_v6(cp->ipvs, cp->af, skb, cp->dest,
+       local = __ip_vs_get_out_rt_v6(ipvs, cp->af, skb, cp->dest,
                                      &cp->daddr.in6,
                                      &saddr, ipvsh, 1,
                                      IP_VS_RT_MODE_LOCAL |
@@ -1134,17 +1196,31 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
         */
        max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(struct ipv6hdr);
 
+       tun_type = cp->dest->tun_type;
+
+       if (tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GUE)
+               max_headroom += sizeof(struct udphdr) + sizeof(struct guehdr);
+
        skb = ip_vs_prepare_tunneled_skb(skb, cp->af, max_headroom,
                                         &next_protocol, &payload_len,
                                         &dsfield, &ttl, NULL);
        if (IS_ERR(skb))
                goto tx_error;
 
-       if (iptunnel_handle_offloads(skb, __tun_gso_type_mask(AF_INET6, cp->af)))
+       gso_type = __tun_gso_type_mask(AF_INET6, cp->af);
+       if (tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GUE)
+               gso_type |= SKB_GSO_UDP_TUNNEL;
+
+       if (iptunnel_handle_offloads(skb, gso_type))
                goto tx_error;
 
        skb->transport_header = skb->network_header;
 
+       skb_set_inner_ipproto(skb, next_protocol);
+
+       if (tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GUE)
+               ipvs_gue_encap(net, skb, cp, &next_protocol);
+
        skb_push(skb, sizeof(struct ipv6hdr));
        skb_reset_network_header(skb);
        memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
@@ -1167,7 +1243,7 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
 
        ret = ip_vs_tunnel_xmit_prepare(skb, cp);
        if (ret == NF_ACCEPT)
-               ip6_local_out(cp->ipvs->net, skb->sk, skb);
+               ip6_local_out(net, skb->sk, skb);
        else if (ret == NF_DROP)
                kfree_skb(skb);
 
index 334d6e5b77621314f273bc190d5b704a43342c61..59c18804a10a0deda45a33b2ab24fa39b0710b0c 100644 (file)
@@ -336,7 +336,7 @@ void nf_ct_expect_init(struct nf_conntrack_expect *exp, unsigned int class,
 
        exp->tuple.dst.u.all = *dst;
 
-#ifdef CONFIG_NF_NAT_NEEDED
+#if IS_ENABLED(CONFIG_NF_NAT)
        memset(&exp->saved_addr, 0, sizeof(exp->saved_addr));
        memset(&exp->saved_proto, 0, sizeof(exp->saved_proto));
 #endif
index 66c596d287a5dc44cea26680023e8c12798a5261..32fe3060375aad3e7fa4514518a733b9e0cb8591 100644 (file)
@@ -45,7 +45,7 @@
 #include <net/netfilter/nf_conntrack_timestamp.h>
 #include <net/netfilter/nf_conntrack_labels.h>
 #include <net/netfilter/nf_conntrack_synproxy.h>
-#ifdef CONFIG_NF_NAT_NEEDED
+#if IS_ENABLED(CONFIG_NF_NAT)
 #include <net/netfilter/nf_nat.h>
 #include <net/netfilter/nf_nat_helper.h>
 #endif
@@ -655,7 +655,7 @@ static size_t ctnetlink_nlmsg_size(const struct nf_conn *ct)
               + nla_total_size(0) /* CTA_HELP */
               + nla_total_size(NF_CT_HELPER_NAME_LEN) /* CTA_HELP_NAME */
               + ctnetlink_secctx_size(ct)
-#ifdef CONFIG_NF_NAT_NEEDED
+#if IS_ENABLED(CONFIG_NF_NAT)
               + 2 * nla_total_size(0) /* CTA_NAT_SEQ_ADJ_ORIG|REPL */
               + 6 * nla_total_size(sizeof(u_int32_t)) /* CTA_NAT_SEQ_OFFSET */
 #endif
@@ -1494,7 +1494,7 @@ static int ctnetlink_get_ct_unconfirmed(struct net *net, struct sock *ctnl,
        return -EOPNOTSUPP;
 }
 
-#ifdef CONFIG_NF_NAT_NEEDED
+#if IS_ENABLED(CONFIG_NF_NAT)
 static int
 ctnetlink_parse_nat_setup(struct nf_conn *ct,
                          enum nf_nat_manip_type manip,
@@ -1586,7 +1586,7 @@ ctnetlink_change_status(struct nf_conn *ct, const struct nlattr * const cda[])
 static int
 ctnetlink_setup_nat(struct nf_conn *ct, const struct nlattr * const cda[])
 {
-#ifdef CONFIG_NF_NAT_NEEDED
+#if IS_ENABLED(CONFIG_NF_NAT)
        int ret;
 
        if (!cda[CTA_NAT_DST] && !cda[CTA_NAT_SRC])
@@ -2369,7 +2369,7 @@ ctnetlink_glue_build_size(const struct nf_conn *ct)
               + nla_total_size(0) /* CTA_HELP */
               + nla_total_size(NF_CT_HELPER_NAME_LEN) /* CTA_HELP_NAME */
               + ctnetlink_secctx_size(ct)
-#ifdef CONFIG_NF_NAT_NEEDED
+#if IS_ENABLED(CONFIG_NF_NAT)
               + 2 * nla_total_size(0) /* CTA_NAT_SEQ_ADJ_ORIG|REPL */
               + 6 * nla_total_size(sizeof(u_int32_t)) /* CTA_NAT_SEQ_OFFSET */
 #endif
@@ -2699,7 +2699,7 @@ ctnetlink_exp_dump_expect(struct sk_buff *skb,
        struct nf_conn *master = exp->master;
        long timeout = ((long)exp->timeout.expires - (long)jiffies) / HZ;
        struct nf_conn_help *help;
-#ifdef CONFIG_NF_NAT_NEEDED
+#if IS_ENABLED(CONFIG_NF_NAT)
        struct nlattr *nest_parms;
        struct nf_conntrack_tuple nat_tuple = {};
 #endif
@@ -2717,7 +2717,7 @@ ctnetlink_exp_dump_expect(struct sk_buff *skb,
                                 CTA_EXPECT_MASTER) < 0)
                goto nla_put_failure;
 
-#ifdef CONFIG_NF_NAT_NEEDED
+#if IS_ENABLED(CONFIG_NF_NAT)
        if (!nf_inet_addr_cmp(&exp->saved_addr, &any_addr) ||
            exp->saved_proto.all) {
                nest_parms = nla_nest_start(skb, CTA_EXPECT_NAT | NLA_F_NESTED);
@@ -3180,7 +3180,7 @@ ctnetlink_parse_expect_nat(const struct nlattr *attr,
                           struct nf_conntrack_expect *exp,
                           u_int8_t u3)
 {
-#ifdef CONFIG_NF_NAT_NEEDED
+#if IS_ENABLED(CONFIG_NF_NAT)
        struct nlattr *tb[CTA_EXPECT_NAT_MAX+1];
        struct nf_conntrack_tuple nat_tuple = {};
        int err;
index 39fcc1ed18f3501b3120fc9aeffbe44e27fda933..d5454d1031a3b4aaf0fc1bfe944149acb7f14ab3 100644 (file)
@@ -928,7 +928,7 @@ static int set_expected_rtp_rtcp(struct sk_buff *skb, unsigned int protoff,
                    nfct_help(exp->master)->helper != nfct_help(ct)->helper ||
                    exp->class != class)
                        break;
-#ifdef CONFIG_NF_NAT_NEEDED
+#if IS_ENABLED(CONFIG_NF_NAT)
                if (!direct_rtp &&
                    (!nf_inet_addr_cmp(&exp->saved_addr, &exp->tuple.dst.u3) ||
                     exp->saved_proto.udp.port != exp->tuple.dst.u.udp.port) &&
index 1d291a51cd45b74e5f70b34c304e519f2c41875b..6452550d187fecd919c483cea627ed1adbf274cc 100644 (file)
@@ -235,13 +235,10 @@ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
        if (tuplehash == NULL)
                return NF_ACCEPT;
 
-       outdev = dev_get_by_index_rcu(state->net, tuplehash->tuple.oifidx);
-       if (!outdev)
-               return NF_ACCEPT;
-
        dir = tuplehash->tuple.dir;
        flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
        rt = (struct rtable *)flow->tuplehash[dir].tuple.dst_cache;
+       outdev = rt->dst.dev;
 
        if (unlikely(nf_flow_exceeds_mtu(skb, flow->tuplehash[dir].tuple.mtu)) &&
            (ip_hdr(skb)->frag_off & htons(IP_DF)) != 0)
@@ -452,13 +449,10 @@ nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
        if (tuplehash == NULL)
                return NF_ACCEPT;
 
-       outdev = dev_get_by_index_rcu(state->net, tuplehash->tuple.oifidx);
-       if (!outdev)
-               return NF_ACCEPT;
-
        dir = tuplehash->tuple.dir;
        flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
        rt = (struct rt6_info *)flow->tuplehash[dir].tuple.dst_cache;
+       outdev = rt->dst.dev;
 
        if (unlikely(nf_flow_exceeds_mtu(skb, flow->tuplehash[dir].tuple.mtu)))
                return NF_ACCEPT;
index e15779fd58e3a5c8425c0f8ff5296f2bfdf338c8..d6c43902ebd766c1653befa5929659e5c4658606 100644 (file)
@@ -7,9 +7,6 @@
 #include <linux/netdevice.h>
 
 /* nf_queue.c */
-int nf_queue(struct sk_buff *skb, struct nf_hook_state *state,
-            const struct nf_hook_entries *entries, unsigned int index,
-            unsigned int verdict);
 void nf_queue_nf_hook_drop(struct net *net);
 
 /* nf_log.c */
index af7dc65377584d26f4b5d98ef55dd06f93d8107d..a9ec49edd7f43bc8a3ed7707cd8e3e0ee4bd8681 100644 (file)
@@ -1009,7 +1009,7 @@ static struct nf_ct_helper_expectfn follow_master_nat = {
        .expectfn       = nf_nat_follow_master,
 };
 
-int nf_nat_register_fn(struct net *net, const struct nf_hook_ops *ops,
+int nf_nat_register_fn(struct net *net, u8 pf, const struct nf_hook_ops *ops,
                       const struct nf_hook_ops *orig_nat_ops, unsigned int ops_count)
 {
        struct nat_net *nat_net = net_generic(net, nat_net_id);
@@ -1019,14 +1019,12 @@ int nf_nat_register_fn(struct net *net, const struct nf_hook_ops *ops,
        struct nf_hook_ops *nat_ops;
        int i, ret;
 
-       if (WARN_ON_ONCE(ops->pf >= ARRAY_SIZE(nat_net->nat_proto_net)))
+       if (WARN_ON_ONCE(pf >= ARRAY_SIZE(nat_net->nat_proto_net)))
                return -EINVAL;
 
-       nat_proto_net = &nat_net->nat_proto_net[ops->pf];
+       nat_proto_net = &nat_net->nat_proto_net[pf];
 
        for (i = 0; i < ops_count; i++) {
-               if (WARN_ON(orig_nat_ops[i].pf != ops->pf))
-                       return -EINVAL;
                if (orig_nat_ops[i].hooknum == hooknum) {
                        hooknum = i;
                        break;
@@ -1086,8 +1084,8 @@ int nf_nat_register_fn(struct net *net, const struct nf_hook_ops *ops,
        return ret;
 }
 
-void nf_nat_unregister_fn(struct net *net, const struct nf_hook_ops *ops,
-                         unsigned int ops_count)
+void nf_nat_unregister_fn(struct net *net, u8 pf, const struct nf_hook_ops *ops,
+                         unsigned int ops_count)
 {
        struct nat_net *nat_net = net_generic(net, nat_net_id);
        struct nf_nat_hooks_net *nat_proto_net;
@@ -1096,10 +1094,10 @@ void nf_nat_unregister_fn(struct net *net, const struct nf_hook_ops *ops,
        int hooknum = ops->hooknum;
        int i;
 
-       if (ops->pf >= ARRAY_SIZE(nat_net->nat_proto_net))
+       if (pf >= ARRAY_SIZE(nat_net->nat_proto_net))
                return;
 
-       nat_proto_net = &nat_net->nat_proto_net[ops->pf];
+       nat_proto_net = &nat_net->nat_proto_net[pf];
 
        mutex_lock(&nf_nat_proto_mutex);
        if (WARN_ON(nat_proto_net->users == 0))
index d85c4d902e7b1deca67975abdf13b3bf8326351f..8e8a65d46345b292c8fcac2fc7fcdfd1be1c5557 100644 (file)
@@ -7,12 +7,10 @@
 #include <linux/netfilter_ipv4.h>
 #include <linux/netfilter_ipv6.h>
 
-#include <net/netfilter/ipv4/nf_nat_masquerade.h>
-#include <net/netfilter/ipv6/nf_nat_masquerade.h>
+#include <net/netfilter/nf_nat_masquerade.h>
 
 static DEFINE_MUTEX(masq_mutex);
-static unsigned int masq_refcnt4 __read_mostly;
-static unsigned int masq_refcnt6 __read_mostly;
+static unsigned int masq_refcnt __read_mostly;
 
 unsigned int
 nf_nat_masquerade_ipv4(struct sk_buff *skb, unsigned int hooknum,
@@ -137,56 +135,6 @@ static struct notifier_block masq_inet_notifier = {
        .notifier_call  = masq_inet_event,
 };
 
-int nf_nat_masquerade_ipv4_register_notifier(void)
-{
-       int ret = 0;
-
-       mutex_lock(&masq_mutex);
-       if (WARN_ON_ONCE(masq_refcnt4 == UINT_MAX)) {
-               ret = -EOVERFLOW;
-               goto out_unlock;
-       }
-
-       /* check if the notifier was already set */
-       if (++masq_refcnt4 > 1)
-               goto out_unlock;
-
-       /* Register for device down reports */
-       ret = register_netdevice_notifier(&masq_dev_notifier);
-       if (ret)
-               goto err_dec;
-       /* Register IP address change reports */
-       ret = register_inetaddr_notifier(&masq_inet_notifier);
-       if (ret)
-               goto err_unregister;
-
-       mutex_unlock(&masq_mutex);
-       return ret;
-
-err_unregister:
-       unregister_netdevice_notifier(&masq_dev_notifier);
-err_dec:
-       masq_refcnt4--;
-out_unlock:
-       mutex_unlock(&masq_mutex);
-       return ret;
-}
-EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv4_register_notifier);
-
-void nf_nat_masquerade_ipv4_unregister_notifier(void)
-{
-       mutex_lock(&masq_mutex);
-       /* check if the notifier still has clients */
-       if (--masq_refcnt4 > 0)
-               goto out_unlock;
-
-       unregister_netdevice_notifier(&masq_dev_notifier);
-       unregister_inetaddr_notifier(&masq_inet_notifier);
-out_unlock:
-       mutex_unlock(&masq_mutex);
-}
-EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv4_unregister_notifier);
-
 #if IS_ENABLED(CONFIG_IPV6)
 static atomic_t v6_worker_count __read_mostly;
 
@@ -322,44 +270,68 @@ static struct notifier_block masq_inet6_notifier = {
        .notifier_call  = masq_inet6_event,
 };
 
-int nf_nat_masquerade_ipv6_register_notifier(void)
+static int nf_nat_masquerade_ipv6_register_notifier(void)
+{
+       return register_inet6addr_notifier(&masq_inet6_notifier);
+}
+#else
+static inline int nf_nat_masquerade_ipv6_register_notifier(void) { return 0; }
+#endif
+
+int nf_nat_masquerade_inet_register_notifiers(void)
 {
        int ret = 0;
 
        mutex_lock(&masq_mutex);
-       if (WARN_ON_ONCE(masq_refcnt6 == UINT_MAX)) {
+       if (WARN_ON_ONCE(masq_refcnt == UINT_MAX)) {
                ret = -EOVERFLOW;
                goto out_unlock;
        }
 
-       /* check if the notifier is already set */
-       if (++masq_refcnt6 > 1)
+       /* check if the notifier was already set */
+       if (++masq_refcnt > 1)
                goto out_unlock;
 
-       ret = register_inet6addr_notifier(&masq_inet6_notifier);
+       /* Register for device down reports */
+       ret = register_netdevice_notifier(&masq_dev_notifier);
        if (ret)
                goto err_dec;
+       /* Register IP address change reports */
+       ret = register_inetaddr_notifier(&masq_inet_notifier);
+       if (ret)
+               goto err_unregister;
+
+       ret = nf_nat_masquerade_ipv6_register_notifier();
+       if (ret)
+               goto err_unreg_inet;
 
        mutex_unlock(&masq_mutex);
        return ret;
+err_unreg_inet:
+       unregister_inetaddr_notifier(&masq_inet_notifier);
+err_unregister:
+       unregister_netdevice_notifier(&masq_dev_notifier);
 err_dec:
-       masq_refcnt6--;
+       masq_refcnt--;
 out_unlock:
        mutex_unlock(&masq_mutex);
        return ret;
 }
-EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv6_register_notifier);
+EXPORT_SYMBOL_GPL(nf_nat_masquerade_inet_register_notifiers);
 
-void nf_nat_masquerade_ipv6_unregister_notifier(void)
+void nf_nat_masquerade_inet_unregister_notifiers(void)
 {
        mutex_lock(&masq_mutex);
-       /* check if the notifier still has clients */
-       if (--masq_refcnt6 > 0)
+       /* check if the notifiers still have clients */
+       if (--masq_refcnt > 0)
                goto out_unlock;
 
+       unregister_netdevice_notifier(&masq_dev_notifier);
+       unregister_inetaddr_notifier(&masq_inet_notifier);
+#if IS_ENABLED(CONFIG_IPV6)
        unregister_inet6addr_notifier(&masq_inet6_notifier);
+#endif
 out_unlock:
        mutex_unlock(&masq_mutex);
 }
-EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv6_unregister_notifier);
-#endif
+EXPORT_SYMBOL_GPL(nf_nat_masquerade_inet_unregister_notifiers);
index 62743da3004fa631e6034dc6eb2ff98fcc1b9cd5..84f5c90a7f21f5eb9c23bed36fd31b3159623618 100644 (file)
@@ -725,7 +725,7 @@ nf_nat_ipv4_local_fn(void *priv, struct sk_buff *skb,
        return ret;
 }
 
-static const struct nf_hook_ops nf_nat_ipv4_ops[] = {
+const struct nf_hook_ops nf_nat_ipv4_ops[] = {
        /* Before packet filtering, change destination */
        {
                .hook           = nf_nat_ipv4_in,
@@ -758,13 +758,14 @@ static const struct nf_hook_ops nf_nat_ipv4_ops[] = {
 
 int nf_nat_ipv4_register_fn(struct net *net, const struct nf_hook_ops *ops)
 {
-       return nf_nat_register_fn(net, ops, nf_nat_ipv4_ops, ARRAY_SIZE(nf_nat_ipv4_ops));
+       return nf_nat_register_fn(net, ops->pf, ops, nf_nat_ipv4_ops,
+                                 ARRAY_SIZE(nf_nat_ipv4_ops));
 }
 EXPORT_SYMBOL_GPL(nf_nat_ipv4_register_fn);
 
 void nf_nat_ipv4_unregister_fn(struct net *net, const struct nf_hook_ops *ops)
 {
-       nf_nat_unregister_fn(net, ops, ARRAY_SIZE(nf_nat_ipv4_ops));
+       nf_nat_unregister_fn(net, ops->pf, ops, ARRAY_SIZE(nf_nat_ipv4_ops));
 }
 EXPORT_SYMBOL_GPL(nf_nat_ipv4_unregister_fn);
 
@@ -925,20 +926,6 @@ nf_nat_ipv6_out(void *priv, struct sk_buff *skb,
        return ret;
 }
 
-static int nat_route_me_harder(struct net *net, struct sk_buff *skb)
-{
-#ifdef CONFIG_IPV6_MODULE
-       const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops();
-
-       if (!v6_ops)
-               return -EHOSTUNREACH;
-
-       return v6_ops->route_me_harder(net, skb);
-#else
-       return ip6_route_me_harder(net, skb);
-#endif
-}
-
 static unsigned int
 nf_nat_ipv6_local_fn(void *priv, struct sk_buff *skb,
                     const struct nf_hook_state *state)
@@ -958,7 +945,7 @@ nf_nat_ipv6_local_fn(void *priv, struct sk_buff *skb,
 
                if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.dst.u3,
                                      &ct->tuplehash[!dir].tuple.src.u3)) {
-                       err = nat_route_me_harder(state->net, skb);
+                       err = nf_ip6_route_me_harder(state->net, skb);
                        if (err < 0)
                                ret = NF_DROP_ERR(err);
                }
@@ -977,7 +964,7 @@ nf_nat_ipv6_local_fn(void *priv, struct sk_buff *skb,
        return ret;
 }
 
-static const struct nf_hook_ops nf_nat_ipv6_ops[] = {
+const struct nf_hook_ops nf_nat_ipv6_ops[] = {
        /* Before packet filtering, change destination */
        {
                .hook           = nf_nat_ipv6_in,
@@ -1010,14 +997,44 @@ static const struct nf_hook_ops nf_nat_ipv6_ops[] = {
 
 int nf_nat_ipv6_register_fn(struct net *net, const struct nf_hook_ops *ops)
 {
-       return nf_nat_register_fn(net, ops, nf_nat_ipv6_ops,
+       return nf_nat_register_fn(net, ops->pf, ops, nf_nat_ipv6_ops,
                                  ARRAY_SIZE(nf_nat_ipv6_ops));
 }
 EXPORT_SYMBOL_GPL(nf_nat_ipv6_register_fn);
 
 void nf_nat_ipv6_unregister_fn(struct net *net, const struct nf_hook_ops *ops)
 {
-       nf_nat_unregister_fn(net, ops, ARRAY_SIZE(nf_nat_ipv6_ops));
+       nf_nat_unregister_fn(net, ops->pf, ops, ARRAY_SIZE(nf_nat_ipv6_ops));
 }
 EXPORT_SYMBOL_GPL(nf_nat_ipv6_unregister_fn);
 #endif /* CONFIG_IPV6 */
+
+#if defined(CONFIG_NF_TABLES_INET) && IS_ENABLED(CONFIG_NFT_NAT)
+int nf_nat_inet_register_fn(struct net *net, const struct nf_hook_ops *ops)
+{
+       int ret;
+
+       if (WARN_ON_ONCE(ops->pf != NFPROTO_INET))
+               return -EINVAL;
+
+       ret = nf_nat_register_fn(net, NFPROTO_IPV6, ops, nf_nat_ipv6_ops,
+                                ARRAY_SIZE(nf_nat_ipv6_ops));
+       if (ret)
+               return ret;
+
+       ret = nf_nat_register_fn(net, NFPROTO_IPV4, ops, nf_nat_ipv4_ops,
+                                ARRAY_SIZE(nf_nat_ipv4_ops));
+       if (ret)
+               nf_nat_ipv6_unregister_fn(net, ops);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(nf_nat_inet_register_fn);
+
+void nf_nat_inet_unregister_fn(struct net *net, const struct nf_hook_ops *ops)
+{
+       nf_nat_unregister_fn(net, NFPROTO_IPV4, ops, ARRAY_SIZE(nf_nat_ipv4_ops));
+       nf_nat_unregister_fn(net, NFPROTO_IPV6, ops, ARRAY_SIZE(nf_nat_ipv6_ops));
+}
+EXPORT_SYMBOL_GPL(nf_nat_inet_unregister_fn);
+#endif /* NFT INET NAT */
index a36a77bae1d6d2c79459dc72826c066a442f60c3..9dc1d6e04946f59ea34377a14834bd711c4cd092 100644 (file)
@@ -240,6 +240,7 @@ int nf_queue(struct sk_buff *skb, struct nf_hook_state *state,
 
        return 0;
 }
+EXPORT_SYMBOL_GPL(nf_queue);
 
 static unsigned int nf_iterate(struct sk_buff *skb,
                               struct nf_hook_state *state,
index ef7772e976cc802afc64ea25d28f1fbecde773be..e058273c5ddea7dcdd3e87cf88c3410f0812220f 100644 (file)
@@ -53,7 +53,6 @@ static const struct rhashtable_params nft_chain_ht_params = {
        .hashfn                 = nft_chain_hash,
        .obj_hashfn             = nft_chain_hash_obj,
        .obj_cmpfn              = nft_chain_hash_cmp,
-       .locks_mul              = 1,
        .automatic_shrinking    = true,
 };
 
@@ -3194,9 +3193,7 @@ static int nf_msecs_to_jiffies64(const struct nlattr *nla, u64 *result)
 
 static __be64 nf_jiffies64_to_msecs(u64 input)
 {
-       u64 ms = jiffies64_to_nsecs(input);
-
-       return cpu_to_be64(div_u64(ms, NSEC_PER_MSEC));
+       return cpu_to_be64(jiffies64_to_msecs(input));
 }
 
 static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx,
@@ -3439,8 +3436,7 @@ static int nf_tables_getset(struct net *net, struct sock *nlsk,
        return err;
 }
 
-static int nf_tables_set_desc_parse(const struct nft_ctx *ctx,
-                                   struct nft_set_desc *desc,
+static int nf_tables_set_desc_parse(struct nft_set_desc *desc,
                                    const struct nlattr *nla)
 {
        struct nlattr *da[NFTA_SET_DESC_MAX + 1];
@@ -3566,7 +3562,7 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
                policy = ntohl(nla_get_be32(nla[NFTA_SET_POLICY]));
 
        if (nla[NFTA_SET_DESC] != NULL) {
-               err = nf_tables_set_desc_parse(&ctx, &desc, nla[NFTA_SET_DESC]);
+               err = nf_tables_set_desc_parse(&desc, nla[NFTA_SET_DESC]);
                if (err < 0)
                        return err;
        }
@@ -3786,8 +3782,8 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
 }
 EXPORT_SYMBOL_GPL(nf_tables_bind_set);
 
-void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
-                         struct nft_set_binding *binding, bool event)
+static void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
+                                struct nft_set_binding *binding, bool event)
 {
        list_del_rcu(&binding->list);
 
@@ -3798,7 +3794,6 @@ void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
                                             GFP_KERNEL);
        }
 }
-EXPORT_SYMBOL_GPL(nf_tables_unbind_set);
 
 void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set,
                              struct nft_set_binding *binding,
@@ -7534,6 +7529,7 @@ static int __init nf_tables_module_init(void)
        if (err < 0)
                goto err5;
 
+       nft_chain_route_init();
        return err;
 err5:
        rhltable_destroy(&nft_objname_ht);
@@ -7553,6 +7549,7 @@ static void __exit nf_tables_module_exit(void)
        nfnetlink_subsys_unregister(&nf_tables_subsys);
        unregister_netdevice_notifier(&nf_tables_flowtable_notifier);
        nft_chain_filter_fini();
+       nft_chain_route_fini();
        unregister_pernet_subsys(&nf_tables_net_ops);
        cancel_work_sync(&trans_destroy_work);
        rcu_barrier();
index 1f1d90c1716b5bd697b18870e0bb0a7f9d7e63ec..7b827bcb412c90c90dcf030bad8f445c59396d08 100644 (file)
@@ -255,9 +255,9 @@ nf_osf_match(const struct sk_buff *skb, u_int8_t family,
 }
 EXPORT_SYMBOL_GPL(nf_osf_match);
 
-const char *nf_osf_find(const struct sk_buff *skb,
-                       const struct list_head *nf_osf_fingers,
-                       const int ttl_check)
+bool nf_osf_find(const struct sk_buff *skb,
+                const struct list_head *nf_osf_fingers,
+                const int ttl_check, struct nf_osf_data *data)
 {
        const struct iphdr *ip = ip_hdr(skb);
        const struct nf_osf_user_finger *f;
@@ -265,24 +265,24 @@ const char *nf_osf_find(const struct sk_buff *skb,
        const struct nf_osf_finger *kf;
        struct nf_osf_hdr_ctx ctx;
        const struct tcphdr *tcp;
-       const char *genre = NULL;
 
        memset(&ctx, 0, sizeof(ctx));
 
        tcp = nf_osf_hdr_ctx_init(&ctx, skb, ip, opts);
        if (!tcp)
-               return NULL;
+               return false;
 
        list_for_each_entry_rcu(kf, &nf_osf_fingers[ctx.df], finger_entry) {
                f = &kf->finger;
                if (!nf_osf_match_one(skb, f, ttl_check, &ctx))
                        continue;
 
-               genre = f->genre;
+               data->genre = f->genre;
+               data->version = f->version;
                break;
        }
 
-       return genre;
+       return true;
 }
 EXPORT_SYMBOL_GPL(nf_osf_find);
 
index ee4852088d509a154091f41e213d3c5a91c094c4..2f89bde3c61cb93690238c8c53ccad3556806417 100644 (file)
@@ -74,6 +74,36 @@ static const struct nft_chain_type nft_chain_nat_ipv6 = {
 };
 #endif
 
+#ifdef CONFIG_NF_TABLES_INET
+static int nft_nat_inet_reg(struct net *net, const struct nf_hook_ops *ops)
+{
+       return nf_nat_inet_register_fn(net, ops);
+}
+
+static void nft_nat_inet_unreg(struct net *net, const struct nf_hook_ops *ops)
+{
+       nf_nat_inet_unregister_fn(net, ops);
+}
+
+static const struct nft_chain_type nft_chain_nat_inet = {
+       .name           = "nat",
+       .type           = NFT_CHAIN_T_NAT,
+       .family         = NFPROTO_INET,
+       .hook_mask      = (1 << NF_INET_PRE_ROUTING) |
+                         (1 << NF_INET_LOCAL_IN) |
+                         (1 << NF_INET_LOCAL_OUT) |
+                         (1 << NF_INET_POST_ROUTING),
+       .hooks          = {
+               [NF_INET_PRE_ROUTING]   = nft_nat_do_chain,
+               [NF_INET_LOCAL_IN]      = nft_nat_do_chain,
+               [NF_INET_LOCAL_OUT]     = nft_nat_do_chain,
+               [NF_INET_POST_ROUTING]  = nft_nat_do_chain,
+       },
+       .ops_register           = nft_nat_inet_reg,
+       .ops_unregister         = nft_nat_inet_unreg,
+};
+#endif
+
 static int __init nft_chain_nat_init(void)
 {
 #ifdef CONFIG_NF_TABLES_IPV6
@@ -82,6 +112,9 @@ static int __init nft_chain_nat_init(void)
 #ifdef CONFIG_NF_TABLES_IPV4
        nft_register_chain_type(&nft_chain_nat_ipv4);
 #endif
+#ifdef CONFIG_NF_TABLES_INET
+       nft_register_chain_type(&nft_chain_nat_inet);
+#endif
 
        return 0;
 }
@@ -94,6 +127,9 @@ static void __exit nft_chain_nat_exit(void)
 #ifdef CONFIG_NF_TABLES_IPV6
        nft_unregister_chain_type(&nft_chain_nat_ipv6);
 #endif
+#ifdef CONFIG_NF_TABLES_INET
+       nft_unregister_chain_type(&nft_chain_nat_inet);
+#endif
 }
 
 module_init(nft_chain_nat_init);
diff --git a/net/netfilter/nft_chain_route.c b/net/netfilter/nft_chain_route.c
new file mode 100644 (file)
index 0000000..8826bbe
--- /dev/null
@@ -0,0 +1,169 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/skbuff.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/netfilter_ipv6.h>
+#include <linux/netfilter/nfnetlink.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables_ipv4.h>
+#include <net/netfilter/nf_tables_ipv6.h>
+#include <net/route.h>
+#include <net/ip.h>
+
+#ifdef CONFIG_NF_TABLES_IPV4
+static unsigned int nf_route_table_hook4(void *priv,
+                                        struct sk_buff *skb,
+                                        const struct nf_hook_state *state)
+{
+       const struct iphdr *iph;
+       struct nft_pktinfo pkt;
+       __be32 saddr, daddr;
+       unsigned int ret;
+       u32 mark;
+       int err;
+       u8 tos;
+
+       nft_set_pktinfo(&pkt, skb, state);
+       nft_set_pktinfo_ipv4(&pkt, skb);
+
+       mark = skb->mark;
+       iph = ip_hdr(skb);
+       saddr = iph->saddr;
+       daddr = iph->daddr;
+       tos = iph->tos;
+
+       ret = nft_do_chain(&pkt, priv);
+       if (ret == NF_ACCEPT) {
+               iph = ip_hdr(skb);
+
+               if (iph->saddr != saddr ||
+                   iph->daddr != daddr ||
+                   skb->mark != mark ||
+                   iph->tos != tos) {
+                       err = ip_route_me_harder(state->net, skb, RTN_UNSPEC);
+                       if (err < 0)
+                               ret = NF_DROP_ERR(err);
+               }
+       }
+       return ret;
+}
+
+static const struct nft_chain_type nft_chain_route_ipv4 = {
+       .name           = "route",
+       .type           = NFT_CHAIN_T_ROUTE,
+       .family         = NFPROTO_IPV4,
+       .hook_mask      = (1 << NF_INET_LOCAL_OUT),
+       .hooks          = {
+               [NF_INET_LOCAL_OUT]     = nf_route_table_hook4,
+       },
+};
+#endif
+
+#ifdef CONFIG_NF_TABLES_IPV6
+static unsigned int nf_route_table_hook6(void *priv,
+                                        struct sk_buff *skb,
+                                        const struct nf_hook_state *state)
+{
+       struct in6_addr saddr, daddr;
+       struct nft_pktinfo pkt;
+       u32 mark, flowlabel;
+       unsigned int ret;
+       u8 hop_limit;
+       int err;
+
+       nft_set_pktinfo(&pkt, skb, state);
+       nft_set_pktinfo_ipv6(&pkt, skb);
+
+       /* save source/dest address, mark, hoplimit, flowlabel, priority */
+       memcpy(&saddr, &ipv6_hdr(skb)->saddr, sizeof(saddr));
+       memcpy(&daddr, &ipv6_hdr(skb)->daddr, sizeof(daddr));
+       mark = skb->mark;
+       hop_limit = ipv6_hdr(skb)->hop_limit;
+
+       /* flowlabel and prio (includes version, which shouldn't change either)*/
+       flowlabel = *((u32 *)ipv6_hdr(skb));
+
+       ret = nft_do_chain(&pkt, priv);
+       if (ret == NF_ACCEPT &&
+           (memcmp(&ipv6_hdr(skb)->saddr, &saddr, sizeof(saddr)) ||
+            memcmp(&ipv6_hdr(skb)->daddr, &daddr, sizeof(daddr)) ||
+            skb->mark != mark ||
+            ipv6_hdr(skb)->hop_limit != hop_limit ||
+            flowlabel != *((u32 *)ipv6_hdr(skb)))) {
+               err = nf_ip6_route_me_harder(state->net, skb);
+               if (err < 0)
+                       ret = NF_DROP_ERR(err);
+       }
+
+       return ret;
+}
+
+static const struct nft_chain_type nft_chain_route_ipv6 = {
+       .name           = "route",
+       .type           = NFT_CHAIN_T_ROUTE,
+       .family         = NFPROTO_IPV6,
+       .hook_mask      = (1 << NF_INET_LOCAL_OUT),
+       .hooks          = {
+               [NF_INET_LOCAL_OUT]     = nf_route_table_hook6,
+       },
+};
+#endif
+
+#ifdef CONFIG_NF_TABLES_INET
+static unsigned int nf_route_table_inet(void *priv,
+                                       struct sk_buff *skb,
+                                       const struct nf_hook_state *state)
+{
+       struct nft_pktinfo pkt;
+
+       switch (state->pf) {
+       case NFPROTO_IPV4:
+               return nf_route_table_hook4(priv, skb, state);
+       case NFPROTO_IPV6:
+               return nf_route_table_hook6(priv, skb, state);
+       default:
+               nft_set_pktinfo(&pkt, skb, state);
+               break;
+       }
+
+       return nft_do_chain(&pkt, priv);
+}
+
+static const struct nft_chain_type nft_chain_route_inet = {
+       .name           = "route",
+       .type           = NFT_CHAIN_T_ROUTE,
+       .family         = NFPROTO_INET,
+       .hook_mask      = (1 << NF_INET_LOCAL_OUT),
+       .hooks          = {
+               [NF_INET_LOCAL_OUT]     = nf_route_table_inet,
+       },
+};
+#endif
+
+void __init nft_chain_route_init(void)
+{
+#ifdef CONFIG_NF_TABLES_IPV6
+       nft_register_chain_type(&nft_chain_route_ipv6);
+#endif
+#ifdef CONFIG_NF_TABLES_IPV4
+       nft_register_chain_type(&nft_chain_route_ipv4);
+#endif
+#ifdef CONFIG_NF_TABLES_INET
+       nft_register_chain_type(&nft_chain_route_inet);
+#endif
+}
+
+void __exit nft_chain_route_fini(void)
+{
+#ifdef CONFIG_NF_TABLES_IPV6
+       nft_unregister_chain_type(&nft_chain_route_ipv6);
+#endif
+#ifdef CONFIG_NF_TABLES_IPV4
+       nft_unregister_chain_type(&nft_chain_route_ipv4);
+#endif
+#ifdef CONFIG_NF_TABLES_INET
+       nft_unregister_chain_type(&nft_chain_route_inet);
+#endif
+}
index bee156eaa4008202c9fc4ebc1548e98dbd881d43..86fd90085eafa4fc511929ce9c3a989b0469e50b 100644 (file)
@@ -14,8 +14,7 @@
 #include <linux/netfilter/nf_tables.h>
 #include <net/netfilter/nf_tables.h>
 #include <net/netfilter/nf_nat.h>
-#include <net/netfilter/ipv4/nf_nat_masquerade.h>
-#include <net/netfilter/ipv6/nf_nat_masquerade.h>
+#include <net/netfilter/nf_nat_masquerade.h>
 
 struct nft_masq {
        u32                     flags;
@@ -196,28 +195,73 @@ static struct nft_expr_type nft_masq_ipv6_type __read_mostly = {
 
 static int __init nft_masq_module_init_ipv6(void)
 {
-       int ret = nft_register_expr(&nft_masq_ipv6_type);
-
-       if (ret)
-               return ret;
-
-       ret = nf_nat_masquerade_ipv6_register_notifier();
-       if (ret < 0)
-               nft_unregister_expr(&nft_masq_ipv6_type);
-
-       return ret;
+       return nft_register_expr(&nft_masq_ipv6_type);
 }
 
 static void nft_masq_module_exit_ipv6(void)
 {
        nft_unregister_expr(&nft_masq_ipv6_type);
-       nf_nat_masquerade_ipv6_unregister_notifier();
 }
 #else
 static inline int nft_masq_module_init_ipv6(void) { return 0; }
 static inline void nft_masq_module_exit_ipv6(void) {}
 #endif
 
+#ifdef CONFIG_NF_TABLES_INET
+static void nft_masq_inet_eval(const struct nft_expr *expr,
+                              struct nft_regs *regs,
+                              const struct nft_pktinfo *pkt)
+{
+       switch (nft_pf(pkt)) {
+       case NFPROTO_IPV4:
+               return nft_masq_ipv4_eval(expr, regs, pkt);
+       case NFPROTO_IPV6:
+               return nft_masq_ipv6_eval(expr, regs, pkt);
+       }
+
+       WARN_ON_ONCE(1);
+}
+
+static void
+nft_masq_inet_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
+{
+       nf_ct_netns_put(ctx->net, NFPROTO_INET);
+}
+
+static struct nft_expr_type nft_masq_inet_type;
+static const struct nft_expr_ops nft_masq_inet_ops = {
+       .type           = &nft_masq_inet_type,
+       .size           = NFT_EXPR_SIZE(sizeof(struct nft_masq)),
+       .eval           = nft_masq_inet_eval,
+       .init           = nft_masq_init,
+       .destroy        = nft_masq_inet_destroy,
+       .dump           = nft_masq_dump,
+       .validate       = nft_masq_validate,
+};
+
+static struct nft_expr_type nft_masq_inet_type __read_mostly = {
+       .family         = NFPROTO_INET,
+       .name           = "masq",
+       .ops            = &nft_masq_inet_ops,
+       .policy         = nft_masq_policy,
+       .maxattr        = NFTA_MASQ_MAX,
+       .owner          = THIS_MODULE,
+};
+
+static int __init nft_masq_module_init_inet(void)
+{
+       return nft_register_expr(&nft_masq_inet_type);
+}
+
+static void nft_masq_module_exit_inet(void)
+{
+       nft_unregister_expr(&nft_masq_inet_type);
+}
+#else
+static inline int nft_masq_module_init_inet(void) { return 0; }
+static inline void nft_masq_module_exit_inet(void) {}
+#endif
+
 static int __init nft_masq_module_init(void)
 {
        int ret;
@@ -226,15 +270,23 @@ static int __init nft_masq_module_init(void)
        if (ret < 0)
                return ret;
 
+       ret = nft_masq_module_init_inet();
+       if (ret < 0) {
+               nft_masq_module_exit_ipv6();
+               return ret;
+       }
+
        ret = nft_register_expr(&nft_masq_ipv4_type);
        if (ret < 0) {
+               nft_masq_module_exit_inet();
                nft_masq_module_exit_ipv6();
                return ret;
        }
 
-       ret = nf_nat_masquerade_ipv4_register_notifier();
+       ret = nf_nat_masquerade_inet_register_notifiers();
        if (ret < 0) {
                nft_masq_module_exit_ipv6();
+               nft_masq_module_exit_inet();
                nft_unregister_expr(&nft_masq_ipv4_type);
                return ret;
        }
@@ -245,8 +297,9 @@ static int __init nft_masq_module_init(void)
 static void __exit nft_masq_module_exit(void)
 {
        nft_masq_module_exit_ipv6();
+       nft_masq_module_exit_inet();
        nft_unregister_expr(&nft_masq_ipv4_type);
-       nf_nat_masquerade_ipv4_unregister_notifier();
+       nf_nat_masquerade_inet_unregister_notifiers();
 }
 
 module_init(nft_masq_module_init);
index e93aed9bda887dff7db5c0312aa6ad66d388fdcf..d90d421826aa28546d3c6f2a1b0e1b92f9b1caca 100644 (file)
@@ -140,7 +140,7 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
                return -EINVAL;
 
        family = ntohl(nla_get_be32(tb[NFTA_NAT_FAMILY]));
-       if (family != ctx->family)
+       if (ctx->family != NFPROTO_INET && ctx->family != family)
                return -EOPNOTSUPP;
 
        switch (family) {
@@ -278,13 +278,67 @@ static struct nft_expr_type nft_nat_type __read_mostly = {
        .owner          = THIS_MODULE,
 };
 
+#ifdef CONFIG_NF_TABLES_INET
+static void nft_nat_inet_eval(const struct nft_expr *expr,
+                             struct nft_regs *regs,
+                             const struct nft_pktinfo *pkt)
+{
+       const struct nft_nat *priv = nft_expr_priv(expr);
+
+       if (priv->family == nft_pf(pkt))
+               nft_nat_eval(expr, regs, pkt);
+}
+
+static const struct nft_expr_ops nft_nat_inet_ops = {
+       .type           = &nft_nat_type,
+       .size           = NFT_EXPR_SIZE(sizeof(struct nft_nat)),
+       .eval           = nft_nat_inet_eval,
+       .init           = nft_nat_init,
+       .destroy        = nft_nat_destroy,
+       .dump           = nft_nat_dump,
+       .validate       = nft_nat_validate,
+};
+
+static struct nft_expr_type nft_inet_nat_type __read_mostly = {
+       .name           = "nat",
+       .family         = NFPROTO_INET,
+       .ops            = &nft_nat_inet_ops,
+       .policy         = nft_nat_policy,
+       .maxattr        = NFTA_NAT_MAX,
+       .owner          = THIS_MODULE,
+};
+
+static int nft_nat_inet_module_init(void)
+{
+       return nft_register_expr(&nft_inet_nat_type);
+}
+
+static void nft_nat_inet_module_exit(void)
+{
+       nft_unregister_expr(&nft_inet_nat_type);
+}
+#else
+static int nft_nat_inet_module_init(void) { return 0; }
+static void nft_nat_inet_module_exit(void) { }
+#endif
+
 static int __init nft_nat_module_init(void)
 {
-       return nft_register_expr(&nft_nat_type);
+       int ret = nft_nat_inet_module_init();
+
+       if (ret)
+               return ret;
+
+       ret = nft_register_expr(&nft_nat_type);
+       if (ret)
+               nft_nat_inet_module_exit();
+
+       return ret;
 }
 
 static void __exit nft_nat_module_exit(void)
 {
+       nft_nat_inet_module_exit();
        nft_unregister_expr(&nft_nat_type);
 }
 
index b13618c764ec296377778ee405b9067515ada25a..87b60d6617ef1d55703f01cbf7a7a849ef515645 100644 (file)
@@ -7,11 +7,13 @@
 struct nft_osf {
        enum nft_registers      dreg:8;
        u8                      ttl;
+       u32                     flags;
 };
 
 static const struct nla_policy nft_osf_policy[NFTA_OSF_MAX + 1] = {
        [NFTA_OSF_DREG]         = { .type = NLA_U32 },
        [NFTA_OSF_TTL]          = { .type = NLA_U8 },
+       [NFTA_OSF_FLAGS]        = { .type = NLA_U32 },
 };
 
 static void nft_osf_eval(const struct nft_expr *expr, struct nft_regs *regs,
@@ -20,9 +22,10 @@ static void nft_osf_eval(const struct nft_expr *expr, struct nft_regs *regs,
        struct nft_osf *priv = nft_expr_priv(expr);
        u32 *dest = &regs->data[priv->dreg];
        struct sk_buff *skb = pkt->skb;
+       char os_match[NFT_OSF_MAXGENRELEN + 1];
        const struct tcphdr *tcp;
+       struct nf_osf_data data;
        struct tcphdr _tcph;
-       const char *os_name;
 
        tcp = skb_header_pointer(skb, ip_hdrlen(skb),
                                 sizeof(struct tcphdr), &_tcph);
@@ -35,11 +38,17 @@ static void nft_osf_eval(const struct nft_expr *expr, struct nft_regs *regs,
                return;
        }
 
-       os_name = nf_osf_find(skb, nf_osf_fingers, priv->ttl);
-       if (!os_name)
+       if (!nf_osf_find(skb, nf_osf_fingers, priv->ttl, &data)) {
                strncpy((char *)dest, "unknown", NFT_OSF_MAXGENRELEN);
-       else
-               strncpy((char *)dest, os_name, NFT_OSF_MAXGENRELEN);
+       } else {
+               if (priv->flags & NFT_OSF_F_VERSION)
+                       snprintf(os_match, NFT_OSF_MAXGENRELEN, "%s:%s",
+                                data.genre, data.version);
+               else
+                       strlcpy(os_match, data.genre, NFT_OSF_MAXGENRELEN);
+
+               strncpy((char *)dest, os_match, NFT_OSF_MAXGENRELEN);
+       }
 }
 
 static int nft_osf_init(const struct nft_ctx *ctx,
@@ -47,6 +56,7 @@ static int nft_osf_init(const struct nft_ctx *ctx,
                        const struct nlattr * const tb[])
 {
        struct nft_osf *priv = nft_expr_priv(expr);
+       u32 flags;
        int err;
        u8 ttl;
 
@@ -57,6 +67,13 @@ static int nft_osf_init(const struct nft_ctx *ctx,
                priv->ttl = ttl;
        }
 
+       if (tb[NFTA_OSF_FLAGS]) {
+               flags = ntohl(nla_get_be32(tb[NFTA_OSF_FLAGS]));
+               if (flags != NFT_OSF_F_VERSION)
+                       return -EINVAL;
+               priv->flags = flags;
+       }
+
        priv->dreg = nft_parse_register(tb[NFTA_OSF_DREG]);
        err = nft_validate_register_store(ctx, priv->dreg, NULL,
                                          NFT_DATA_VALUE, NFT_OSF_MAXGENRELEN);
@@ -73,6 +90,9 @@ static int nft_osf_dump(struct sk_buff *skb, const struct nft_expr *expr)
        if (nla_put_u8(skb, NFTA_OSF_TTL, priv->ttl))
                goto nla_put_failure;
 
+       if (nla_put_be32(skb, NFTA_OSF_FLAGS, ntohl(priv->flags)))
+               goto nla_put_failure;
+
        if (nft_dump_register(skb, NFTA_OSF_DREG, priv->dreg))
                goto nla_put_failure;
 
index a340cd8a751b483766e4ed7274ce0fb2c2b193e2..da74fdc4a6844f2294ba952188469b0cee64aefd 100644 (file)
@@ -82,7 +82,7 @@ static int nft_redir_init(const struct nft_ctx *ctx,
        return nf_ct_netns_get(ctx->net, ctx->family);
 }
 
-int nft_redir_dump(struct sk_buff *skb, const struct nft_expr *expr)
+static int nft_redir_dump(struct sk_buff *skb, const struct nft_expr *expr)
 {
        const struct nft_redir *priv = nft_expr_priv(expr);
 
@@ -202,6 +202,55 @@ static struct nft_expr_type nft_redir_ipv6_type __read_mostly = {
 };
 #endif
 
+#ifdef CONFIG_NF_TABLES_INET
+static void nft_redir_inet_eval(const struct nft_expr *expr,
+                               struct nft_regs *regs,
+                               const struct nft_pktinfo *pkt)
+{
+       switch (nft_pf(pkt)) {
+       case NFPROTO_IPV4:
+               return nft_redir_ipv4_eval(expr, regs, pkt);
+       case NFPROTO_IPV6:
+               return nft_redir_ipv6_eval(expr, regs, pkt);
+       }
+
+       WARN_ON_ONCE(1);
+}
+
+static void
+nft_redir_inet_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
+{
+       nf_ct_netns_put(ctx->net, NFPROTO_INET);
+}
+
+static struct nft_expr_type nft_redir_inet_type;
+static const struct nft_expr_ops nft_redir_inet_ops = {
+       .type           = &nft_redir_inet_type,
+       .size           = NFT_EXPR_SIZE(sizeof(struct nft_redir)),
+       .eval           = nft_redir_inet_eval,
+       .init           = nft_redir_init,
+       .destroy        = nft_redir_inet_destroy,
+       .dump           = nft_redir_dump,
+       .validate       = nft_redir_validate,
+};
+
+static struct nft_expr_type nft_redir_inet_type __read_mostly = {
+       .family         = NFPROTO_INET,
+       .name           = "redir",
+       .ops            = &nft_redir_inet_ops,
+       .policy         = nft_redir_policy,
+       .maxattr        = NFTA_MASQ_MAX,
+       .owner          = THIS_MODULE,
+};
+
+static int __init nft_redir_module_init_inet(void)
+{
+       return nft_register_expr(&nft_redir_inet_type);
+}
+#else
+static inline int nft_redir_module_init_inet(void) { return 0; }
+#endif
+
 static int __init nft_redir_module_init(void)
 {
        int ret = nft_register_expr(&nft_redir_ipv4_type);
@@ -217,6 +266,15 @@ static int __init nft_redir_module_init(void)
        }
 #endif
 
+       ret = nft_redir_module_init_inet();
+       if (ret < 0) {
+               nft_unregister_expr(&nft_redir_ipv4_type);
+#ifdef CONFIG_NF_TABLES_IPV6
+               nft_unregister_expr(&nft_redir_ipv6_type);
+#endif
+               return ret;
+       }
+
        return ret;
 }
 
@@ -226,6 +284,9 @@ static void __exit nft_redir_module_exit(void)
 #ifdef CONFIG_NF_TABLES_IPV6
        nft_unregister_expr(&nft_redir_ipv6_type);
 #endif
+#ifdef CONFIG_NF_TABLES_INET
+       nft_unregister_expr(&nft_redir_inet_type);
+#endif
 }
 
 module_init(nft_redir_module_init);
index e5e5c64df8d1a94b4e448c2a146c7c112f2e0e58..0a6656ed153430a5b585675a760e4c0c322ab8c8 100644 (file)
@@ -227,7 +227,7 @@ xt_request_find_match(uint8_t nfproto, const char *name, uint8_t revision)
 EXPORT_SYMBOL_GPL(xt_request_find_match);
 
 /* Find target, grabs ref.  Returns ERR_PTR() on error. */
-struct xt_target *xt_find_target(u8 af, const char *name, u8 revision)
+static struct xt_target *xt_find_target(u8 af, const char *name, u8 revision)
 {
        struct xt_target *t;
        int err = -ENOENT;
@@ -255,7 +255,6 @@ struct xt_target *xt_find_target(u8 af, const char *name, u8 revision)
 
        return ERR_PTR(err);
 }
-EXPORT_SYMBOL(xt_find_target);
 
 struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision)
 {
similarity index 52%
rename from net/ipv4/netfilter/ipt_MASQUERADE.c
rename to net/netfilter/xt_MASQUERADE.c
index fd3f9e8a74daf4954d675eaf2cc381196facc4f4..ece20d832adc0adb994a92df9cf323feaebb9e81 100644 (file)
@@ -9,20 +9,10 @@
  * published by the Free Software Foundation.
  */
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/types.h>
-#include <linux/inetdevice.h>
-#include <linux/ip.h>
-#include <linux/timer.h>
 #include <linux/module.h>
-#include <linux/netfilter.h>
-#include <net/protocol.h>
-#include <net/ip.h>
-#include <net/checksum.h>
-#include <net/route.h>
-#include <linux/netfilter_ipv4.h>
 #include <linux/netfilter/x_tables.h>
 #include <net/netfilter/nf_nat.h>
-#include <net/netfilter/ipv4/nf_nat_masquerade.h>
+#include <net/netfilter/nf_nat_masquerade.h>
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
@@ -64,38 +54,78 @@ static void masquerade_tg_destroy(const struct xt_tgdtor_param *par)
        nf_ct_netns_put(par->net, par->family);
 }
 
-static struct xt_target masquerade_tg_reg __read_mostly = {
-       .name           = "MASQUERADE",
-       .family         = NFPROTO_IPV4,
-       .target         = masquerade_tg,
-       .targetsize     = sizeof(struct nf_nat_ipv4_multi_range_compat),
-       .table          = "nat",
-       .hooks          = 1 << NF_INET_POST_ROUTING,
-       .checkentry     = masquerade_tg_check,
-       .destroy        = masquerade_tg_destroy,
-       .me             = THIS_MODULE,
+#if IS_ENABLED(CONFIG_IPV6)
+static unsigned int
+masquerade_tg6(struct sk_buff *skb, const struct xt_action_param *par)
+{
+       return nf_nat_masquerade_ipv6(skb, par->targinfo, xt_out(par));
+}
+
+static int masquerade_tg6_checkentry(const struct xt_tgchk_param *par)
+{
+       const struct nf_nat_range2 *range = par->targinfo;
+
+       if (range->flags & NF_NAT_RANGE_MAP_IPS)
+               return -EINVAL;
+
+       return nf_ct_netns_get(par->net, par->family);
+}
+#endif
+
+static struct xt_target masquerade_tg_reg[] __read_mostly = {
+       {
+#if IS_ENABLED(CONFIG_IPV6)
+               .name           = "MASQUERADE",
+               .family         = NFPROTO_IPV6,
+               .target         = masquerade_tg6,
+               .targetsize     = sizeof(struct nf_nat_range),
+               .table          = "nat",
+               .hooks          = 1 << NF_INET_POST_ROUTING,
+               .checkentry     = masquerade_tg6_checkentry,
+               .destroy        = masquerade_tg_destroy,
+               .me             = THIS_MODULE,
+       }, {
+#endif
+               .name           = "MASQUERADE",
+               .family         = NFPROTO_IPV4,
+               .target         = masquerade_tg,
+               .targetsize     = sizeof(struct nf_nat_ipv4_multi_range_compat),
+               .table          = "nat",
+               .hooks          = 1 << NF_INET_POST_ROUTING,
+               .checkentry     = masquerade_tg_check,
+               .destroy        = masquerade_tg_destroy,
+               .me             = THIS_MODULE,
+       }
 };
 
 static int __init masquerade_tg_init(void)
 {
        int ret;
 
-       ret = xt_register_target(&masquerade_tg_reg);
+       ret = xt_register_targets(masquerade_tg_reg,
+                                 ARRAY_SIZE(masquerade_tg_reg));
        if (ret)
                return ret;
 
-       ret = nf_nat_masquerade_ipv4_register_notifier();
-       if (ret)
-               xt_unregister_target(&masquerade_tg_reg);
+       ret = nf_nat_masquerade_inet_register_notifiers();
+       if (ret) {
+               xt_unregister_targets(masquerade_tg_reg,
+                                     ARRAY_SIZE(masquerade_tg_reg));
+               return ret;
+       }
 
        return ret;
 }
 
 static void __exit masquerade_tg_exit(void)
 {
-       xt_unregister_target(&masquerade_tg_reg);
-       nf_nat_masquerade_ipv4_unregister_notifier();
+       xt_unregister_targets(masquerade_tg_reg, ARRAY_SIZE(masquerade_tg_reg));
+       nf_nat_masquerade_inet_unregister_notifiers();
 }
 
 module_init(masquerade_tg_init);
 module_exit(masquerade_tg_exit);
+#if IS_ENABLED(CONFIG_IPV6)
+MODULE_ALIAS("ip6t_MASQUERADE");
+#endif
+MODULE_ALIAS("ipt_MASQUERADE");
index f28e937320a3b453371143a035e6967482d17cd4..216ab915dd54d4ad7f205aac9f0ab3e3291a2684 100644 (file)
@@ -988,7 +988,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
        struct netlink_sock *nlk = nlk_sk(sk);
        struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
        int err = 0;
-       unsigned long groups = nladdr->nl_groups;
+       unsigned long groups;
        bool bound;
 
        if (addr_len < sizeof(struct sockaddr_nl))
@@ -996,6 +996,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
 
        if (nladdr->nl_family != AF_NETLINK)
                return -EINVAL;
+       groups = nladdr->nl_groups;
 
        /* Only superuser is allowed to listen multicasts */
        if (groups) {
index 1d3144d1990352f4eb8942220e03e225e01af19f..71ffd1a6dc7c6063c00f4c82f985fe9fc0d80dc0 100644 (file)
@@ -1392,18 +1392,22 @@ static int __init nr_proto_init(void)
        int i;
        int rc = proto_register(&nr_proto, 0);
 
-       if (rc != 0)
-               goto out;
+       if (rc)
+               return rc;
 
        if (nr_ndevs > 0x7fffffff/sizeof(struct net_device *)) {
-               printk(KERN_ERR "NET/ROM: nr_proto_init - nr_ndevs parameter to large\n");
-               return -1;
+               pr_err("NET/ROM: %s - nr_ndevs parameter too large\n",
+                      __func__);
+               rc = -EINVAL;
+               goto unregister_proto;
        }
 
        dev_nr = kcalloc(nr_ndevs, sizeof(struct net_device *), GFP_KERNEL);
-       if (dev_nr == NULL) {
-               printk(KERN_ERR "NET/ROM: nr_proto_init - unable to allocate device array\n");
-               return -1;
+       if (!dev_nr) {
+               pr_err("NET/ROM: %s - unable to allocate device array\n",
+                      __func__);
+               rc = -ENOMEM;
+               goto unregister_proto;
        }
 
        for (i = 0; i < nr_ndevs; i++) {
@@ -1413,13 +1417,13 @@ static int __init nr_proto_init(void)
                sprintf(name, "nr%d", i);
                dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, nr_setup);
                if (!dev) {
-                       printk(KERN_ERR "NET/ROM: nr_proto_init - unable to allocate device structure\n");
+                       rc = -ENOMEM;
                        goto fail;
                }
 
                dev->base_addr = i;
-               if (register_netdev(dev)) {
-                       printk(KERN_ERR "NET/ROM: nr_proto_init - unable to register network device\n");
+               rc = register_netdev(dev);
+               if (rc) {
                        free_netdev(dev);
                        goto fail;
                }
@@ -1427,36 +1431,64 @@ static int __init nr_proto_init(void)
                dev_nr[i] = dev;
        }
 
-       if (sock_register(&nr_family_ops)) {
-               printk(KERN_ERR "NET/ROM: nr_proto_init - unable to register socket family\n");
+       rc = sock_register(&nr_family_ops);
+       if (rc)
                goto fail;
-       }
 
-       register_netdevice_notifier(&nr_dev_notifier);
+       rc = register_netdevice_notifier(&nr_dev_notifier);
+       if (rc)
+               goto out_sock;
 
        ax25_register_pid(&nr_pid);
        ax25_linkfail_register(&nr_linkfail_notifier);
 
 #ifdef CONFIG_SYSCTL
-       nr_register_sysctl();
+       rc = nr_register_sysctl();
+       if (rc)
+               goto out_sysctl;
 #endif
 
        nr_loopback_init();
 
-       proc_create_seq("nr", 0444, init_net.proc_net, &nr_info_seqops);
-       proc_create_seq("nr_neigh", 0444, init_net.proc_net, &nr_neigh_seqops);
-       proc_create_seq("nr_nodes", 0444, init_net.proc_net, &nr_node_seqops);
-out:
-       return rc;
+       rc = -ENOMEM;
+       if (!proc_create_seq("nr", 0444, init_net.proc_net, &nr_info_seqops))
+               goto proc_remove1;
+       if (!proc_create_seq("nr_neigh", 0444, init_net.proc_net,
+                            &nr_neigh_seqops))
+               goto proc_remove2;
+       if (!proc_create_seq("nr_nodes", 0444, init_net.proc_net,
+                            &nr_node_seqops))
+               goto proc_remove3;
+
+       return 0;
+
+proc_remove3:
+       remove_proc_entry("nr_neigh", init_net.proc_net);
+proc_remove2:
+       remove_proc_entry("nr", init_net.proc_net);
+proc_remove1:
+
+       nr_loopback_clear();
+       nr_rt_free();
+
+#ifdef CONFIG_SYSCTL
+       nr_unregister_sysctl();
+out_sysctl:
+#endif
+       ax25_linkfail_release(&nr_linkfail_notifier);
+       ax25_protocol_release(AX25_P_NETROM);
+       unregister_netdevice_notifier(&nr_dev_notifier);
+out_sock:
+       sock_unregister(PF_NETROM);
 fail:
        while (--i >= 0) {
                unregister_netdev(dev_nr[i]);
                free_netdev(dev_nr[i]);
        }
        kfree(dev_nr);
+unregister_proto:
        proto_unregister(&nr_proto);
-       rc = -1;
-       goto out;
+       return rc;
 }
 
 module_init(nr_proto_init);
index 215ad22a96476ebb9d30919e99d67bda8e1ce88f..93d13f01998133a2b6c6b3256bb19679f14cea65 100644 (file)
@@ -70,7 +70,7 @@ static void nr_loopback_timer(struct timer_list *unused)
        }
 }
 
-void __exit nr_loopback_clear(void)
+void nr_loopback_clear(void)
 {
        del_timer_sync(&loopback_timer);
        skb_queue_purge(&loopback_queue);
index 6485f593e2f09bc3f215e2ad2c638154de738487..b76aa668a94bce6c6d1280d5cbf307d6ce94e013 100644 (file)
@@ -953,7 +953,7 @@ const struct seq_operations nr_neigh_seqops = {
 /*
  *     Free all memory associated with the nodes and routes lists.
  */
-void __exit nr_rt_free(void)
+void nr_rt_free(void)
 {
        struct nr_neigh *s = NULL;
        struct nr_node  *t = NULL;
index ba1c368b3f186e140149a75e8d98dee24587a020..771011b84270e87854a8c47db1c0253640449fcc 100644 (file)
@@ -146,9 +146,12 @@ static struct ctl_table nr_table[] = {
        { }
 };
 
-void __init nr_register_sysctl(void)
+int __init nr_register_sysctl(void)
 {
        nr_table_header = register_net_sysctl(&init_net, "net/netrom", nr_table);
+       if (!nr_table_header)
+               return -ENOMEM;
+       return 0;
 }
 
 void nr_unregister_sysctl(void)
index ddfc52ac1f9b4391cb8b6e0f107658b1ee011565..c0d323b58e732318cc352be35bf940693b9bd028 100644 (file)
@@ -312,6 +312,10 @@ static void nci_hci_cmd_received(struct nci_dev *ndev, u8 pipe,
                create_info = (struct nci_hci_create_pipe_resp *)skb->data;
                dest_gate = create_info->dest_gate;
                new_pipe = create_info->pipe;
+               if (new_pipe >= NCI_HCI_MAX_PIPES) {
+                       status = NCI_HCI_ANY_E_NOK;
+                       goto exit;
+               }
 
                /* Save the new created pipe and bind with local gate,
                 * the description for skb->data[3] is destination gate id
@@ -336,6 +340,10 @@ static void nci_hci_cmd_received(struct nci_dev *ndev, u8 pipe,
                        goto exit;
                }
                delete_info = (struct nci_hci_delete_pipe_noti *)skb->data;
+               if (delete_info->pipe >= NCI_HCI_MAX_PIPES) {
+                       status = NCI_HCI_ANY_E_NOK;
+                       goto exit;
+               }
 
                ndev->hci_dev->pipes[delete_info->pipe].gate =
                                                NCI_HCI_INVALID_GATE;
index 121b01d4a3c0700f4427f192f28c103806cb2c81..6266299444500a83f31ec16101ef977da484f7a3 100644 (file)
@@ -29,7 +29,7 @@
 #include <net/netfilter/ipv6/nf_defrag_ipv6.h>
 #include <net/ipv6_frag.h>
 
-#ifdef CONFIG_NF_NAT_NEEDED
+#if IS_ENABLED(CONFIG_NF_NAT)
 #include <net/netfilter/nf_nat.h>
 #endif
 
@@ -75,7 +75,7 @@ struct ovs_conntrack_info {
        struct md_mark mark;
        struct md_labels labels;
        char timeout[CTNL_TIMEOUT_NAME_MAX];
-#ifdef CONFIG_NF_NAT_NEEDED
+#if IS_ENABLED(CONFIG_NF_NAT)
        struct nf_nat_range2 range;  /* Only present for SRC NAT and DST NAT. */
 #endif
 };
@@ -721,7 +721,7 @@ static bool skb_nfct_cached(struct net *net,
        return ct_executed;
 }
 
-#ifdef CONFIG_NF_NAT_NEEDED
+#if IS_ENABLED(CONFIG_NF_NAT)
 /* Modelled after nf_nat_ipv[46]_fn().
  * range is only used for new, uninitialized NAT state.
  * Returns either NF_ACCEPT or NF_DROP.
@@ -903,7 +903,7 @@ static int ovs_ct_nat(struct net *net, struct sw_flow_key *key,
 
        return err;
 }
-#else /* !CONFIG_NF_NAT_NEEDED */
+#else /* !CONFIG_NF_NAT */
 static int ovs_ct_nat(struct net *net, struct sw_flow_key *key,
                      const struct ovs_conntrack_info *info,
                      struct sk_buff *skb, struct nf_conn *ct,
@@ -1330,7 +1330,7 @@ static int ovs_ct_add_helper(struct ovs_conntrack_info *info, const char *name,
        return 0;
 }
 
-#ifdef CONFIG_NF_NAT_NEEDED
+#if IS_ENABLED(CONFIG_NF_NAT)
 static int parse_nat(const struct nlattr *attr,
                     struct ovs_conntrack_info *info, bool log)
 {
@@ -1467,7 +1467,7 @@ static const struct ovs_ct_len_tbl ovs_ct_attr_lens[OVS_CT_ATTR_MAX + 1] = {
                                    .maxlen = sizeof(struct md_labels) },
        [OVS_CT_ATTR_HELPER]    = { .minlen = 1,
                                    .maxlen = NF_CT_HELPER_NAME_LEN },
-#ifdef CONFIG_NF_NAT_NEEDED
+#if IS_ENABLED(CONFIG_NF_NAT)
        /* NAT length is checked when parsing the nested attributes. */
        [OVS_CT_ATTR_NAT]       = { .minlen = 0, .maxlen = INT_MAX },
 #endif
@@ -1547,7 +1547,7 @@ static int parse_ct(const struct nlattr *attr, struct ovs_conntrack_info *info,
                                return -EINVAL;
                        }
                        break;
-#ifdef CONFIG_NF_NAT_NEEDED
+#if IS_ENABLED(CONFIG_NF_NAT)
                case OVS_CT_ATTR_NAT: {
                        int err = parse_nat(a, info, log);
 
@@ -1677,7 +1677,7 @@ int ovs_ct_copy_action(struct net *net, const struct nlattr *attr,
        return err;
 }
 
-#ifdef CONFIG_NF_NAT_NEEDED
+#if IS_ENABLED(CONFIG_NF_NAT)
 static bool ovs_ct_nat_to_attr(const struct ovs_conntrack_info *info,
                               struct sk_buff *skb)
 {
@@ -1783,7 +1783,7 @@ int ovs_ct_action_to_attr(const struct ovs_conntrack_info *ct_info,
                        return -EMSGSIZE;
        }
 
-#ifdef CONFIG_NF_NAT_NEEDED
+#if IS_ENABLED(CONFIG_NF_NAT)
        if (ct_info->nat && !ovs_ct_nat_to_attr(ct_info, skb))
                return -EMSGSIZE;
 #endif
@@ -1804,9 +1804,9 @@ static void __ovs_ct_free_action(struct ovs_conntrack_info *ct_info)
        if (ct_info->helper)
                nf_conntrack_helper_put(ct_info->helper);
        if (ct_info->ct) {
-               nf_ct_tmpl_free(ct_info->ct);
                if (ct_info->timeout[0])
                        nf_ct_destroy_timeout(ct_info->ct);
+               nf_ct_tmpl_free(ct_info->ct);
        }
 }
 
index bd019058fc6fb80113f6716011558be5daacbb2e..3563acd5f92e0bb2361504494f886a282bd32380 100644 (file)
@@ -2331,14 +2331,14 @@ static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa,
 
        struct sw_flow_actions *acts;
        int new_acts_size;
-       int req_size = NLA_ALIGN(attr_len);
+       size_t req_size = NLA_ALIGN(attr_len);
        int next_offset = offsetof(struct sw_flow_actions, actions) +
                                        (*sfa)->actions_len;
 
        if (req_size <= (ksize(*sfa) - next_offset))
                goto out;
 
-       new_acts_size = ksize(*sfa) * 2;
+       new_acts_size = max(next_offset + req_size, ksize(*sfa) * 2);
 
        if (new_acts_size > MAX_ACTIONS_BUFSIZE) {
                if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size) {
index d6cc97fbbbb02458d958a8f493e37e6249db4db6..2b969f99ef1311f845baea874a985714cb051c7c 100644 (file)
@@ -543,6 +543,9 @@ static int rds_connect(struct socket *sock, struct sockaddr *uaddr,
        struct rds_sock *rs = rds_sk_to_rs(sk);
        int ret = 0;
 
+       if (addr_len < offsetofend(struct sockaddr, sa_family))
+               return -EINVAL;
+
        lock_sock(sk);
 
        switch (uaddr->sa_family) {
index 17c9d9f0c8483b4b0a887e69e7caac246c369423..0f4398e7f2a7add7c20b6fdd333c40af4e719c92 100644 (file)
@@ -173,6 +173,8 @@ int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
        /* We allow an RDS socket to be bound to either IPv4 or IPv6
         * address.
         */
+       if (addr_len < offsetofend(struct sockaddr, sa_family))
+               return -EINVAL;
        if (uaddr->sa_family == AF_INET) {
                struct sockaddr_in *sin = (struct sockaddr_in *)uaddr;
 
index fd2694174607405ab96f6f0dea10bc8dcc8caea9..faf726e00e27c75b11721dbc55518ca60bdf00a6 100644 (file)
@@ -608,7 +608,7 @@ static void rds_tcp_kill_sock(struct net *net)
        list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
                struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net);
 
-               if (net != c_net || !tc->t_sock)
+               if (net != c_net)
                        continue;
                if (!list_has_conn(&tmp_list, tc->t_cpath->cp_conn)) {
                        list_move_tail(&tc->t_tcp_node, &tmp_list);
index 96f2952bbdfd6e62ffcec87f0a565378abbfe4f5..ae8c5d7f3bf1e29460e5b96b05b7b1b1ecd4ce15 100644 (file)
@@ -135,7 +135,7 @@ static int rxrpc_bind(struct socket *sock, struct sockaddr *saddr, int len)
        struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *)saddr;
        struct rxrpc_local *local;
        struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
-       u16 service_id = srx->srx_service;
+       u16 service_id;
        int ret;
 
        _enter("%p,%p,%d", rx, saddr, len);
@@ -143,6 +143,7 @@ static int rxrpc_bind(struct socket *sock, struct sockaddr *saddr, int len)
        ret = rxrpc_validate_address(rx, srx, len);
        if (ret < 0)
                goto error;
+       service_id = srx->srx_service;
 
        lock_sock(&rx->sk);
 
@@ -370,18 +371,22 @@ EXPORT_SYMBOL(rxrpc_kernel_end_call);
  * rxrpc_kernel_check_life - Check to see whether a call is still alive
  * @sock: The socket the call is on
  * @call: The call to check
+ * @_life: Where to store the life value
  *
  * Allow a kernel service to find out whether a call is still alive - ie. we're
- * getting ACKs from the server.  Returns a number representing the life state
- * which can be compared to that returned by a previous call.
+ * getting ACKs from the server.  Passes back in *_life a number representing
+ * the life state which can be compared to that returned by a previous call and
+ * return true if the call is still alive.
  *
  * If the life state stalls, rxrpc_kernel_probe_life() should be called and
  * then 2RTT waited.
  */
-u32 rxrpc_kernel_check_life(const struct socket *sock,
-                           const struct rxrpc_call *call)
+bool rxrpc_kernel_check_life(const struct socket *sock,
+                            const struct rxrpc_call *call,
+                            u32 *_life)
 {
-       return call->acks_latest;
+       *_life = call->acks_latest;
+       return call->state != RXRPC_CALL_COMPLETE;
 }
 EXPORT_SYMBOL(rxrpc_kernel_check_life);
 
index 4b1a534d290a79e3f035ee60766b4f2ebb2e35c2..062ca9dc29b8ab2fa7381c606791d4fd39657962 100644 (file)
@@ -654,6 +654,7 @@ struct rxrpc_call {
        u8                      ackr_reason;    /* reason to ACK */
        u16                     ackr_skew;      /* skew on packet being ACK'd */
        rxrpc_serial_t          ackr_serial;    /* serial of packet being ACK'd */
+       rxrpc_serial_t          ackr_first_seq; /* first sequence number received */
        rxrpc_seq_t             ackr_prev_seq;  /* previous sequence number received */
        rxrpc_seq_t             ackr_consumed;  /* Highest packet shown consumed */
        rxrpc_seq_t             ackr_seen;      /* Highest packet shown seen */
index b6fca8ebb1173f4de1047e96315c26072666c2e9..8d31fb4c51e17c1934face0f4320dfd219525a66 100644 (file)
@@ -153,7 +153,8 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
  * pass a connection-level abort onto all calls on that connection
  */
 static void rxrpc_abort_calls(struct rxrpc_connection *conn,
-                             enum rxrpc_call_completion compl)
+                             enum rxrpc_call_completion compl,
+                             rxrpc_serial_t serial)
 {
        struct rxrpc_call *call;
        int i;
@@ -173,6 +174,9 @@ static void rxrpc_abort_calls(struct rxrpc_connection *conn,
                                                  call->call_id, 0,
                                                  conn->abort_code,
                                                  conn->error);
+                       else
+                               trace_rxrpc_rx_abort(call, serial,
+                                                    conn->abort_code);
                        if (rxrpc_set_call_completion(call, compl,
                                                      conn->abort_code,
                                                      conn->error))
@@ -213,8 +217,6 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
        conn->state = RXRPC_CONN_LOCALLY_ABORTED;
        spin_unlock_bh(&conn->state_lock);
 
-       rxrpc_abort_calls(conn, RXRPC_CALL_LOCALLY_ABORTED);
-
        msg.msg_name    = &conn->params.peer->srx.transport;
        msg.msg_namelen = conn->params.peer->srx.transport_len;
        msg.msg_control = NULL;
@@ -242,6 +244,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
        len = iov[0].iov_len + iov[1].iov_len;
 
        serial = atomic_inc_return(&conn->serial);
+       rxrpc_abort_calls(conn, RXRPC_CALL_LOCALLY_ABORTED, serial);
        whdr.serial = htonl(serial);
        _proto("Tx CONN ABORT %%%u { %d }", serial, conn->abort_code);
 
@@ -321,7 +324,7 @@ static int rxrpc_process_event(struct rxrpc_connection *conn,
                conn->error = -ECONNABORTED;
                conn->abort_code = abort_code;
                conn->state = RXRPC_CONN_REMOTELY_ABORTED;
-               rxrpc_abort_calls(conn, RXRPC_CALL_REMOTELY_ABORTED);
+               rxrpc_abort_calls(conn, RXRPC_CALL_REMOTELY_ABORTED, sp->hdr.serial);
                return -ECONNABORTED;
 
        case RXRPC_PACKET_TYPE_CHALLENGE:
index 9128aa0e40aac8f51a84f10dc0bd0dd5933c1e23..4c6f9d0a00e79e1874f6ff6ceb6632a42c5072ff 100644 (file)
@@ -837,7 +837,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
                u8 acks[RXRPC_MAXACKS];
        } buf;
        rxrpc_serial_t acked_serial;
-       rxrpc_seq_t first_soft_ack, hard_ack;
+       rxrpc_seq_t first_soft_ack, hard_ack, prev_pkt;
        int nr_acks, offset, ioffset;
 
        _enter("");
@@ -851,13 +851,14 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
 
        acked_serial = ntohl(buf.ack.serial);
        first_soft_ack = ntohl(buf.ack.firstPacket);
+       prev_pkt = ntohl(buf.ack.previousPacket);
        hard_ack = first_soft_ack - 1;
        nr_acks = buf.ack.nAcks;
        summary.ack_reason = (buf.ack.reason < RXRPC_ACK__INVALID ?
                              buf.ack.reason : RXRPC_ACK__INVALID);
 
        trace_rxrpc_rx_ack(call, sp->hdr.serial, acked_serial,
-                          first_soft_ack, ntohl(buf.ack.previousPacket),
+                          first_soft_ack, prev_pkt,
                           summary.ack_reason, nr_acks);
 
        if (buf.ack.reason == RXRPC_ACK_PING_RESPONSE)
@@ -878,8 +879,9 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
                                  rxrpc_propose_ack_respond_to_ack);
        }
 
-       /* Discard any out-of-order or duplicate ACKs. */
-       if (before_eq(sp->hdr.serial, call->acks_latest))
+       /* Discard any out-of-order or duplicate ACKs (outside lock). */
+       if (before(first_soft_ack, call->ackr_first_seq) ||
+           before(prev_pkt, call->ackr_prev_seq))
                return;
 
        buf.info.rxMTU = 0;
@@ -890,12 +892,16 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
 
        spin_lock(&call->input_lock);
 
-       /* Discard any out-of-order or duplicate ACKs. */
-       if (before_eq(sp->hdr.serial, call->acks_latest))
+       /* Discard any out-of-order or duplicate ACKs (inside lock). */
+       if (before(first_soft_ack, call->ackr_first_seq) ||
+           before(prev_pkt, call->ackr_prev_seq))
                goto out;
        call->acks_latest_ts = skb->tstamp;
        call->acks_latest = sp->hdr.serial;
 
+       call->ackr_first_seq = first_soft_ack;
+       call->ackr_prev_seq = prev_pkt;
+
        /* Parse rwind and mtu sizes if provided. */
        if (buf.info.rxMTU)
                rxrpc_input_ackinfo(call, skb, &buf.info);
index 15cf42d5b53a56d8d19cabdc8c2b55156d73d28a..9157fd00dce34b5291518f5311e699b6e481b345 100644 (file)
@@ -180,7 +180,7 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
                /* Fall through and set IPv4 options too otherwise we don't get
                 * errors from IPv4 packets sent through the IPv6 socket.
                 */
-
+               /* Fall through */
        case AF_INET:
                /* we want to receive ICMP errors */
                opt = 1;
index bc05af89fc381daa46d7cf8032c9900dfbcea65c..6e84d878053c7b8821483c0c1447a5c338d5fade 100644 (file)
@@ -157,6 +157,11 @@ void rxrpc_error_report(struct sock *sk)
 
        _enter("%p{%d}", sk, local->debug_id);
 
+       /* Clear the outstanding error value on the socket so that it doesn't
+        * cause kernel_sendmsg() to return it later.
+        */
+       sock_error(sk);
+
        skb = sock_dequeue_err_skb(sk);
        if (!skb) {
                _leave("UDP socket errqueue empty");
index 46c9312085b1ba81b4941607f751a07adb8f3c20..bec64deb7b0a2794345c896827846fa8bac57e19 100644 (file)
@@ -152,12 +152,13 @@ static void rxrpc_notify_end_tx(struct rxrpc_sock *rx, struct rxrpc_call *call,
 }
 
 /*
- * Queue a DATA packet for transmission, set the resend timeout and send the
- * packet immediately
+ * Queue a DATA packet for transmission, set the resend timeout and send
+ * the packet immediately.  Returns the error from rxrpc_send_data_packet()
+ * in case the caller wants to do something with it.
  */
-static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
-                              struct sk_buff *skb, bool last,
-                              rxrpc_notify_end_tx_t notify_end_tx)
+static int rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
+                             struct sk_buff *skb, bool last,
+                             rxrpc_notify_end_tx_t notify_end_tx)
 {
        struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
        unsigned long now;
@@ -250,7 +251,8 @@ static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
 
 out:
        rxrpc_free_skb(skb, rxrpc_skb_tx_freed);
-       _leave("");
+       _leave(" = %d", ret);
+       return ret;
 }
 
 /*
@@ -423,9 +425,10 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
                        if (ret < 0)
                                goto out;
 
-                       rxrpc_queue_packet(rx, call, skb,
-                                          !msg_data_left(msg) && !more,
-                                          notify_end_tx);
+                       ret = rxrpc_queue_packet(rx, call, skb,
+                                                !msg_data_left(msg) && !more,
+                                                notify_end_tx);
+                       /* Should check for failure here */
                        skb = NULL;
                }
        } while (msg_data_left(msg) > 0);
index 4060b0955c97db68872a88d6bd05d5143fdc2e7c..0f82d50ea23245be1ce34fcce1cdb4a048c1af17 100644 (file)
@@ -45,8 +45,8 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
        struct nlattr *tb[TCA_SAMPLE_MAX + 1];
        struct psample_group *psample_group;
        struct tcf_chain *goto_ch = NULL;
+       u32 psample_group_num, rate;
        struct tc_sample *parm;
-       u32 psample_group_num;
        struct tcf_sample *s;
        bool exists = false;
        int ret, err;
@@ -85,6 +85,12 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
        if (err < 0)
                goto release_idr;
 
+       rate = nla_get_u32(tb[TCA_SAMPLE_RATE]);
+       if (!rate) {
+               NL_SET_ERR_MSG(extack, "invalid sample rate");
+               err = -EINVAL;
+               goto put_chain;
+       }
        psample_group_num = nla_get_u32(tb[TCA_SAMPLE_PSAMPLE_GROUP]);
        psample_group = psample_group_get(net, psample_group_num);
        if (!psample_group) {
@@ -96,7 +102,7 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
 
        spin_lock_bh(&s->tcf_lock);
        goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
-       s->rate = nla_get_u32(tb[TCA_SAMPLE_RATE]);
+       s->rate = rate;
        s->psample_group_num = psample_group_num;
        RCU_INIT_POINTER(s->psample_group, psample_group);
 
index 99ae30c177c76783dae71bf7955f4d4d0bb3b639..9115f053883f375e6f38d73530a7b88d0b2bd96c 100644 (file)
@@ -3229,7 +3229,6 @@ int tc_setup_flow_action(struct flow_action *flow_action,
                        entry->tunnel = tcf_tunnel_info(act);
                } else if (is_tcf_tunnel_release(act)) {
                        entry->id = FLOW_ACTION_TUNNEL_DECAP;
-                       entry->tunnel = tcf_tunnel_info(act);
                } else if (is_tcf_pedit(act)) {
                        for (k = 0; k < tcf_pedit_nkeys(act); k++) {
                                switch (tcf_pedit_cmd(act, k)) {
index 0638f17ac5ab761a2ff80ddf9f1ebb08aecccd0c..4b5585358699ea1776e43d4f6d9a832fcf01a78b 100644 (file)
@@ -336,8 +336,7 @@ static void fl_mask_free_work(struct work_struct *work)
        fl_mask_free(mask);
 }
 
-static bool fl_mask_put(struct cls_fl_head *head, struct fl_flow_mask *mask,
-                       bool async)
+static bool fl_mask_put(struct cls_fl_head *head, struct fl_flow_mask *mask)
 {
        if (!refcount_dec_and_test(&mask->refcnt))
                return false;
@@ -348,10 +347,7 @@ static bool fl_mask_put(struct cls_fl_head *head, struct fl_flow_mask *mask,
        list_del_rcu(&mask->list);
        spin_unlock(&head->masks_lock);
 
-       if (async)
-               tcf_queue_work(&mask->rwork, fl_mask_free_work);
-       else
-               fl_mask_free(mask);
+       tcf_queue_work(&mask->rwork, fl_mask_free_work);
 
        return true;
 }
@@ -538,7 +534,6 @@ static int __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
                       struct netlink_ext_ack *extack)
 {
        struct cls_fl_head *head = fl_head_dereference(tp);
-       bool async = tcf_exts_get_net(&f->exts);
 
        *last = false;
 
@@ -555,7 +550,7 @@ static int __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
        list_del_rcu(&f->list);
        spin_unlock(&tp->lock);
 
-       *last = fl_mask_put(head, f->mask, async);
+       *last = fl_mask_put(head, f->mask);
        if (!tc_skip_hw(f->flags))
                fl_hw_destroy_filter(tp, f, rtnl_held, extack);
        tcf_unbind_filter(tp, &f->res);
@@ -1459,6 +1454,28 @@ static int fl_set_parms(struct net *net, struct tcf_proto *tp,
        return 0;
 }
 
+static int fl_ht_insert_unique(struct cls_fl_filter *fnew,
+                              struct cls_fl_filter *fold,
+                              bool *in_ht)
+{
+       struct fl_flow_mask *mask = fnew->mask;
+       int err;
+
+       err = rhashtable_lookup_insert_fast(&mask->ht,
+                                           &fnew->ht_node,
+                                           mask->filter_ht_params);
+       if (err) {
+               *in_ht = false;
+               /* It is okay if filter with same key exists when
+                * overwriting.
+                */
+               return fold && err == -EEXIST ? 0 : err;
+       }
+
+       *in_ht = true;
+       return 0;
+}
+
 static int fl_change(struct net *net, struct sk_buff *in_skb,
                     struct tcf_proto *tp, unsigned long base,
                     u32 handle, struct nlattr **tca,
@@ -1470,6 +1487,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
        struct cls_fl_filter *fnew;
        struct fl_flow_mask *mask;
        struct nlattr **tb;
+       bool in_ht;
        int err;
 
        if (!tca[TCA_OPTIONS]) {
@@ -1528,10 +1546,14 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
        if (err)
                goto errout;
 
+       err = fl_ht_insert_unique(fnew, fold, &in_ht);
+       if (err)
+               goto errout_mask;
+
        if (!tc_skip_hw(fnew->flags)) {
                err = fl_hw_replace_filter(tp, fnew, rtnl_held, extack);
                if (err)
-                       goto errout_mask;
+                       goto errout_ht;
        }
 
        if (!tc_in_hw(fnew->flags))
@@ -1557,10 +1579,17 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
 
                fnew->handle = handle;
 
-               err = rhashtable_insert_fast(&fnew->mask->ht, &fnew->ht_node,
-                                            fnew->mask->filter_ht_params);
-               if (err)
-                       goto errout_hw;
+               if (!in_ht) {
+                       struct rhashtable_params params =
+                               fnew->mask->filter_ht_params;
+
+                       err = rhashtable_insert_fast(&fnew->mask->ht,
+                                                    &fnew->ht_node,
+                                                    params);
+                       if (err)
+                               goto errout_hw;
+                       in_ht = true;
+               }
 
                rhashtable_remove_fast(&fold->mask->ht,
                                       &fold->ht_node,
@@ -1571,22 +1600,16 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
 
                spin_unlock(&tp->lock);
 
-               fl_mask_put(head, fold->mask, true);
+               fl_mask_put(head, fold->mask);
                if (!tc_skip_hw(fold->flags))
                        fl_hw_destroy_filter(tp, fold, rtnl_held, NULL);
                tcf_unbind_filter(tp, &fold->res);
-               tcf_exts_get_net(&fold->exts);
                /* Caller holds reference to fold, so refcnt is always > 0
                 * after this.
                 */
                refcount_dec(&fold->refcnt);
                __fl_put(fold);
        } else {
-               if (__fl_lookup(fnew->mask, &fnew->mkey)) {
-                       err = -EEXIST;
-                       goto errout_hw;
-               }
-
                if (handle) {
                        /* user specifies a handle and it doesn't exist */
                        err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
@@ -1609,12 +1632,6 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
                        goto errout_hw;
 
                fnew->handle = handle;
-
-               err = rhashtable_insert_fast(&fnew->mask->ht, &fnew->ht_node,
-                                            fnew->mask->filter_ht_params);
-               if (err)
-                       goto errout_idr;
-
                list_add_tail_rcu(&fnew->list, &fnew->mask->filters);
                spin_unlock(&tp->lock);
        }
@@ -1625,17 +1642,19 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
        kfree(mask);
        return 0;
 
-errout_idr:
-       idr_remove(&head->handle_idr, fnew->handle);
 errout_hw:
        spin_unlock(&tp->lock);
        if (!tc_skip_hw(fnew->flags))
                fl_hw_destroy_filter(tp, fnew, rtnl_held, NULL);
+errout_ht:
+       if (in_ht)
+               rhashtable_remove_fast(&fnew->mask->ht, &fnew->ht_node,
+                                      fnew->mask->filter_ht_params);
 errout_mask:
-       fl_mask_put(head, fnew->mask, true);
+       fl_mask_put(head, fnew->mask);
 errout:
-       tcf_exts_destroy(&fnew->exts);
-       kfree(fnew);
+       tcf_exts_get_net(&fnew->exts);
+       tcf_queue_work(&fnew->rwork, fl_destroy_filter_work);
 errout_tb:
        kfree(tb);
 errout_mask_alloc:
@@ -1683,59 +1702,63 @@ static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg,
 static int fl_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb,
                        void *cb_priv, struct netlink_ext_ack *extack)
 {
-       struct cls_fl_head *head = fl_head_dereference(tp);
        struct tc_cls_flower_offload cls_flower = {};
        struct tcf_block *block = tp->chain->block;
-       struct fl_flow_mask *mask;
+       unsigned long handle = 0;
        struct cls_fl_filter *f;
        int err;
 
-       list_for_each_entry(mask, &head->masks, list) {
-               list_for_each_entry(f, &mask->filters, list) {
-                       if (tc_skip_hw(f->flags))
-                               continue;
-
-                       cls_flower.rule =
-                               flow_rule_alloc(tcf_exts_num_actions(&f->exts));
-                       if (!cls_flower.rule)
-                               return -ENOMEM;
-
-                       tc_cls_common_offload_init(&cls_flower.common, tp,
-                                                  f->flags, extack);
-                       cls_flower.command = add ?
-                               TC_CLSFLOWER_REPLACE : TC_CLSFLOWER_DESTROY;
-                       cls_flower.cookie = (unsigned long)f;
-                       cls_flower.rule->match.dissector = &mask->dissector;
-                       cls_flower.rule->match.mask = &mask->key;
-                       cls_flower.rule->match.key = &f->mkey;
-
-                       err = tc_setup_flow_action(&cls_flower.rule->action,
-                                                  &f->exts);
-                       if (err) {
-                               kfree(cls_flower.rule);
-                               if (tc_skip_sw(f->flags)) {
-                                       NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
-                                       return err;
-                               }
-                               continue;
-                       }
+       while ((f = fl_get_next_filter(tp, &handle))) {
+               if (tc_skip_hw(f->flags))
+                       goto next_flow;
 
-                       cls_flower.classid = f->res.classid;
+               cls_flower.rule =
+                       flow_rule_alloc(tcf_exts_num_actions(&f->exts));
+               if (!cls_flower.rule) {
+                       __fl_put(f);
+                       return -ENOMEM;
+               }
 
-                       err = cb(TC_SETUP_CLSFLOWER, &cls_flower, cb_priv);
+               tc_cls_common_offload_init(&cls_flower.common, tp, f->flags,
+                                          extack);
+               cls_flower.command = add ?
+                       TC_CLSFLOWER_REPLACE : TC_CLSFLOWER_DESTROY;
+               cls_flower.cookie = (unsigned long)f;
+               cls_flower.rule->match.dissector = &f->mask->dissector;
+               cls_flower.rule->match.mask = &f->mask->key;
+               cls_flower.rule->match.key = &f->mkey;
+
+               err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts);
+               if (err) {
                        kfree(cls_flower.rule);
-
-                       if (err) {
-                               if (add && tc_skip_sw(f->flags))
-                                       return err;
-                               continue;
+                       if (tc_skip_sw(f->flags)) {
+                               NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
+                               __fl_put(f);
+                               return err;
                        }
+                       goto next_flow;
+               }
+
+               cls_flower.classid = f->res.classid;
 
-                       spin_lock(&tp->lock);
-                       tc_cls_offload_cnt_update(block, &f->in_hw_count,
-                                                 &f->flags, add);
-                       spin_unlock(&tp->lock);
+               err = cb(TC_SETUP_CLSFLOWER, &cls_flower, cb_priv);
+               kfree(cls_flower.rule);
+
+               if (err) {
+                       if (add && tc_skip_sw(f->flags)) {
+                               __fl_put(f);
+                               return err;
+                       }
+                       goto next_flow;
                }
+
+               spin_lock(&tp->lock);
+               tc_cls_offload_cnt_update(block, &f->in_hw_count, &f->flags,
+                                         add);
+               spin_unlock(&tp->lock);
+next_flow:
+               handle++;
+               __fl_put(f);
        }
 
        return 0;
index 459921bd3d87b5a563d3725015f8765b20566aa0..a13bc351a4148f40f434b25a9adffc4cf9137548 100644 (file)
@@ -130,6 +130,11 @@ static void mall_destroy(struct tcf_proto *tp, bool rtnl_held,
 
 static void *mall_get(struct tcf_proto *tp, u32 handle)
 {
+       struct cls_mall_head *head = rtnl_dereference(tp->root);
+
+       if (head && head->handle == handle)
+               return head;
+
        return NULL;
 }
 
index fb8f138b97763bdf917b202ba1b93ad773d0407f..c126b9f78d6e31760342d2b40398c048336628c1 100644 (file)
@@ -998,6 +998,19 @@ static void notify_and_destroy(struct net *net, struct sk_buff *skb,
                qdisc_put(old);
 }
 
+static void qdisc_clear_nolock(struct Qdisc *sch)
+{
+       sch->flags &= ~TCQ_F_NOLOCK;
+       if (!(sch->flags & TCQ_F_CPUSTATS))
+               return;
+
+       free_percpu(sch->cpu_bstats);
+       free_percpu(sch->cpu_qstats);
+       sch->cpu_bstats = NULL;
+       sch->cpu_qstats = NULL;
+       sch->flags &= ~TCQ_F_CPUSTATS;
+}
+
 /* Graft qdisc "new" to class "classid" of qdisc "parent" or
  * to device "dev".
  *
@@ -1076,7 +1089,7 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
                /* Only support running class lockless if parent is lockless */
                if (new && (new->flags & TCQ_F_NOLOCK) &&
                    parent && !(parent->flags & TCQ_F_NOLOCK))
-                       new->flags &= ~TCQ_F_NOLOCK;
+                       qdisc_clear_nolock(new);
 
                if (!cops || !cops->graft)
                        return -EOPNOTSUPP;
index acc9b9da985f81ffd9b485e082cf1781e6731ba2..259d97bc2abd39df8df646c2ebc34ea272e1fd70 100644 (file)
@@ -1517,16 +1517,27 @@ static unsigned int cake_drop(struct Qdisc *sch, struct sk_buff **to_free)
 
 static u8 cake_handle_diffserv(struct sk_buff *skb, u16 wash)
 {
+       int wlen = skb_network_offset(skb);
        u8 dscp;
 
-       switch (skb->protocol) {
+       switch (tc_skb_protocol(skb)) {
        case htons(ETH_P_IP):
+               wlen += sizeof(struct iphdr);
+               if (!pskb_may_pull(skb, wlen) ||
+                   skb_try_make_writable(skb, wlen))
+                       return 0;
+
                dscp = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
                if (wash && dscp)
                        ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, 0);
                return dscp;
 
        case htons(ETH_P_IPV6):
+               wlen += sizeof(struct ipv6hdr);
+               if (!pskb_may_pull(skb, wlen) ||
+                   skb_try_make_writable(skb, wlen))
+                       return 0;
+
                dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
                if (wash && dscp)
                        ipv6_change_dsfield(ipv6_hdr(skb), INET_ECN_MASK, 0);
index 4dc05409e3fb2742c1af9467aae5d1bf221b7101..114b9048ea7e3682106c6e65644d4d0992e20461 100644 (file)
@@ -1358,9 +1358,11 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
 {
        struct cbq_sched_data *q = qdisc_priv(sch);
        struct cbq_class *cl = (struct cbq_class *)arg;
+       __u32 qlen;
 
        cl->xstats.avgidle = cl->avgidle;
        cl->xstats.undertime = 0;
+       qdisc_qstats_qlen_backlog(cl->q, &qlen, &cl->qstats.backlog);
 
        if (cl->undertime != PSCHED_PASTPERFECT)
                cl->xstats.undertime = cl->undertime - q->now;
@@ -1368,7 +1370,7 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
        if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
                                  d, NULL, &cl->bstats) < 0 ||
            gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
-           gnet_stats_copy_queue(d, NULL, &cl->qstats, cl->q->q.qlen) < 0)
+           gnet_stats_copy_queue(d, NULL, &cl->qstats, qlen) < 0)
                return -1;
 
        return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
@@ -1665,17 +1667,13 @@ static int cbq_delete(struct Qdisc *sch, unsigned long arg)
 {
        struct cbq_sched_data *q = qdisc_priv(sch);
        struct cbq_class *cl = (struct cbq_class *)arg;
-       unsigned int qlen, backlog;
 
        if (cl->filters || cl->children || cl == &q->link)
                return -EBUSY;
 
        sch_tree_lock(sch);
 
-       qlen = cl->q->q.qlen;
-       backlog = cl->q->qstats.backlog;
-       qdisc_reset(cl->q);
-       qdisc_tree_reduce_backlog(cl->q, qlen, backlog);
+       qdisc_purge_queue(cl->q);
 
        if (cl->next_alive)
                cbq_deactivate_class(cl);
index c6a502933fe78b250b971416caeffd796d3f31d3..f68fd7a0e038b527bb91a1bc7717b8fe09048c7c 100644 (file)
 #include <linux/string.h>
 #include <linux/errno.h>
 #include <linux/skbuff.h>
+#include <net/netevent.h>
 #include <net/netlink.h>
 #include <net/sch_generic.h>
 #include <net/pkt_sched.h>
 
+static LIST_HEAD(cbs_list);
+static DEFINE_SPINLOCK(cbs_list_lock);
+
 #define BYTES_PER_KBIT (1000LL / 8)
 
 struct cbs_sched_data {
        bool offload;
        int queue;
-       s64 port_rate; /* in bytes/s */
+       atomic64_t port_rate; /* in bytes/s */
        s64 last; /* timestamp in ns */
        s64 credits; /* in bytes */
        s32 locredit; /* in bytes */
@@ -82,6 +86,7 @@ struct cbs_sched_data {
                       struct sk_buff **to_free);
        struct sk_buff *(*dequeue)(struct Qdisc *sch);
        struct Qdisc *qdisc;
+       struct list_head cbs_list;
 };
 
 static int cbs_child_enqueue(struct sk_buff *skb, struct Qdisc *sch,
@@ -181,6 +186,11 @@ static struct sk_buff *cbs_dequeue_soft(struct Qdisc *sch)
        s64 credits;
        int len;
 
+       if (atomic64_read(&q->port_rate) == -1) {
+               WARN_ONCE(1, "cbs: dequeue() called with unknown port rate.");
+               return NULL;
+       }
+
        if (q->credits < 0) {
                credits = timediff_to_credits(now - q->last, q->idleslope);
 
@@ -207,7 +217,8 @@ static struct sk_buff *cbs_dequeue_soft(struct Qdisc *sch)
        /* As sendslope is a negative number, this will decrease the
         * amount of q->credits.
         */
-       credits = credits_from_len(len, q->sendslope, q->port_rate);
+       credits = credits_from_len(len, q->sendslope,
+                                  atomic64_read(&q->port_rate));
        credits += q->credits;
 
        q->credits = max_t(s64, credits, q->locredit);
@@ -294,6 +305,50 @@ static int cbs_enable_offload(struct net_device *dev, struct cbs_sched_data *q,
        return 0;
 }
 
+static void cbs_set_port_rate(struct net_device *dev, struct cbs_sched_data *q)
+{
+       struct ethtool_link_ksettings ecmd;
+       int port_rate = -1;
+
+       if (!__ethtool_get_link_ksettings(dev, &ecmd) &&
+           ecmd.base.speed != SPEED_UNKNOWN)
+               port_rate = ecmd.base.speed * 1000 * BYTES_PER_KBIT;
+
+       atomic64_set(&q->port_rate, port_rate);
+       netdev_dbg(dev, "cbs: set %s's port_rate to: %lld, linkspeed: %d\n",
+                  dev->name, (long long)atomic64_read(&q->port_rate),
+                  ecmd.base.speed);
+}
+
+static int cbs_dev_notifier(struct notifier_block *nb, unsigned long event,
+                           void *ptr)
+{
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+       struct cbs_sched_data *q;
+       struct net_device *qdev;
+       bool found = false;
+
+       ASSERT_RTNL();
+
+       if (event != NETDEV_UP && event != NETDEV_CHANGE)
+               return NOTIFY_DONE;
+
+       spin_lock(&cbs_list_lock);
+       list_for_each_entry(q, &cbs_list, cbs_list) {
+               qdev = qdisc_dev(q->qdisc);
+               if (qdev == dev) {
+                       found = true;
+                       break;
+               }
+       }
+       spin_unlock(&cbs_list_lock);
+
+       if (found)
+               cbs_set_port_rate(dev, q);
+
+       return NOTIFY_DONE;
+}
+
 static int cbs_change(struct Qdisc *sch, struct nlattr *opt,
                      struct netlink_ext_ack *extack)
 {
@@ -315,16 +370,7 @@ static int cbs_change(struct Qdisc *sch, struct nlattr *opt,
        qopt = nla_data(tb[TCA_CBS_PARMS]);
 
        if (!qopt->offload) {
-               struct ethtool_link_ksettings ecmd;
-               s64 link_speed;
-
-               if (!__ethtool_get_link_ksettings(dev, &ecmd))
-                       link_speed = ecmd.base.speed;
-               else
-                       link_speed = SPEED_1000;
-
-               q->port_rate = link_speed * 1000 * BYTES_PER_KBIT;
-
+               cbs_set_port_rate(dev, q);
                cbs_disable_offload(dev, q);
        } else {
                err = cbs_enable_offload(dev, q, qopt, extack);
@@ -347,6 +393,7 @@ static int cbs_init(struct Qdisc *sch, struct nlattr *opt,
 {
        struct cbs_sched_data *q = qdisc_priv(sch);
        struct net_device *dev = qdisc_dev(sch);
+       int err;
 
        if (!opt) {
                NL_SET_ERR_MSG(extack, "Missing CBS qdisc options  which are mandatory");
@@ -367,7 +414,17 @@ static int cbs_init(struct Qdisc *sch, struct nlattr *opt,
 
        qdisc_watchdog_init(&q->watchdog, sch);
 
-       return cbs_change(sch, opt, extack);
+       err = cbs_change(sch, opt, extack);
+       if (err)
+               return err;
+
+       if (!q->offload) {
+               spin_lock(&cbs_list_lock);
+               list_add(&q->cbs_list, &cbs_list);
+               spin_unlock(&cbs_list_lock);
+       }
+
+       return 0;
 }
 
 static void cbs_destroy(struct Qdisc *sch)
@@ -375,8 +432,11 @@ static void cbs_destroy(struct Qdisc *sch)
        struct cbs_sched_data *q = qdisc_priv(sch);
        struct net_device *dev = qdisc_dev(sch);
 
-       qdisc_watchdog_cancel(&q->watchdog);
+       spin_lock(&cbs_list_lock);
+       list_del(&q->cbs_list);
+       spin_unlock(&cbs_list_lock);
 
+       qdisc_watchdog_cancel(&q->watchdog);
        cbs_disable_offload(dev, q);
 
        if (q->qdisc)
@@ -487,14 +547,24 @@ static struct Qdisc_ops cbs_qdisc_ops __read_mostly = {
        .owner          =       THIS_MODULE,
 };
 
+static struct notifier_block cbs_device_notifier = {
+       .notifier_call = cbs_dev_notifier,
+};
+
 static int __init cbs_module_init(void)
 {
+       int err = register_netdevice_notifier(&cbs_device_notifier);
+
+       if (err)
+               return err;
+
        return register_qdisc(&cbs_qdisc_ops);
 }
 
 static void __exit cbs_module_exit(void)
 {
        unregister_qdisc(&cbs_qdisc_ops);
+       unregister_netdevice_notifier(&cbs_device_notifier);
 }
 module_init(cbs_module_init)
 module_exit(cbs_module_exit)
index 09b8009910657ace91e838eccfa520c81d800750..430df9a55ec4e9742786fb869ab3acf28e84f5ed 100644 (file)
@@ -50,15 +50,6 @@ static struct drr_class *drr_find_class(struct Qdisc *sch, u32 classid)
        return container_of(clc, struct drr_class, common);
 }
 
-static void drr_purge_queue(struct drr_class *cl)
-{
-       unsigned int len = cl->qdisc->q.qlen;
-       unsigned int backlog = cl->qdisc->qstats.backlog;
-
-       qdisc_reset(cl->qdisc);
-       qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
-}
-
 static const struct nla_policy drr_policy[TCA_DRR_MAX + 1] = {
        [TCA_DRR_QUANTUM]       = { .type = NLA_U32 },
 };
@@ -167,7 +158,7 @@ static int drr_delete_class(struct Qdisc *sch, unsigned long arg)
 
        sch_tree_lock(sch);
 
-       drr_purge_queue(cl);
+       qdisc_purge_queue(cl->qdisc);
        qdisc_class_hash_remove(&q->clhash, &cl->common);
 
        sch_tree_unlock(sch);
@@ -269,7 +260,8 @@ static int drr_dump_class_stats(struct Qdisc *sch, unsigned long arg,
                                struct gnet_dump *d)
 {
        struct drr_class *cl = (struct drr_class *)arg;
-       __u32 qlen = cl->qdisc->q.qlen;
+       __u32 qlen = qdisc_qlen_sum(cl->qdisc);
+       struct Qdisc *cl_q = cl->qdisc;
        struct tc_drr_stats xstats;
 
        memset(&xstats, 0, sizeof(xstats));
@@ -279,7 +271,7 @@ static int drr_dump_class_stats(struct Qdisc *sch, unsigned long arg,
        if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
                                  d, NULL, &cl->bstats) < 0 ||
            gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
-           gnet_stats_copy_queue(d, NULL, &cl->qdisc->qstats, qlen) < 0)
+           gnet_stats_copy_queue(d, cl_q->cpu_qstats, &cl_q->qstats, qlen) < 0)
                return -1;
 
        return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
index 81356ef38d1de560ad8ce54b2cf2fe83fe890cd5..848aab3693bd075613555515202a1203da476a80 100644 (file)
@@ -68,7 +68,7 @@ static inline struct sk_buff *__skb_dequeue_bad_txq(struct Qdisc *q)
                        skb = __skb_dequeue(&q->skb_bad_txq);
                        if (qdisc_is_percpu_stats(q)) {
                                qdisc_qstats_cpu_backlog_dec(q, skb);
-                               qdisc_qstats_atomic_qlen_dec(q);
+                               qdisc_qstats_cpu_qlen_dec(q);
                        } else {
                                qdisc_qstats_backlog_dec(q, skb);
                                q->q.qlen--;
@@ -108,7 +108,7 @@ static inline void qdisc_enqueue_skb_bad_txq(struct Qdisc *q,
 
        if (qdisc_is_percpu_stats(q)) {
                qdisc_qstats_cpu_backlog_inc(q, skb);
-               qdisc_qstats_atomic_qlen_inc(q);
+               qdisc_qstats_cpu_qlen_inc(q);
        } else {
                qdisc_qstats_backlog_inc(q, skb);
                q->q.qlen++;
@@ -118,52 +118,36 @@ static inline void qdisc_enqueue_skb_bad_txq(struct Qdisc *q,
                spin_unlock(lock);
 }
 
-static inline int __dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
+static inline void dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
 {
-       while (skb) {
-               struct sk_buff *next = skb->next;
-
-               __skb_queue_tail(&q->gso_skb, skb);
-               q->qstats.requeues++;
-               qdisc_qstats_backlog_inc(q, skb);
-               q->q.qlen++;    /* it's still part of the queue */
+       spinlock_t *lock = NULL;
 
-               skb = next;
+       if (q->flags & TCQ_F_NOLOCK) {
+               lock = qdisc_lock(q);
+               spin_lock(lock);
        }
-       __netif_schedule(q);
-
-       return 0;
-}
 
-static inline int dev_requeue_skb_locked(struct sk_buff *skb, struct Qdisc *q)
-{
-       spinlock_t *lock = qdisc_lock(q);
-
-       spin_lock(lock);
        while (skb) {
                struct sk_buff *next = skb->next;
 
                __skb_queue_tail(&q->gso_skb, skb);
 
-               qdisc_qstats_cpu_requeues_inc(q);
-               qdisc_qstats_cpu_backlog_inc(q, skb);
-               qdisc_qstats_atomic_qlen_inc(q);
+               /* it's still part of the queue */
+               if (qdisc_is_percpu_stats(q)) {
+                       qdisc_qstats_cpu_requeues_inc(q);
+                       qdisc_qstats_cpu_backlog_inc(q, skb);
+                       qdisc_qstats_cpu_qlen_inc(q);
+               } else {
+                       q->qstats.requeues++;
+                       qdisc_qstats_backlog_inc(q, skb);
+                       q->q.qlen++;
+               }
 
                skb = next;
        }
-       spin_unlock(lock);
-
+       if (lock)
+               spin_unlock(lock);
        __netif_schedule(q);
-
-       return 0;
-}
-
-static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
-{
-       if (q->flags & TCQ_F_NOLOCK)
-               return dev_requeue_skb_locked(skb, q);
-       else
-               return __dev_requeue_skb(skb, q);
 }
 
 static void try_bulk_dequeue_skb(struct Qdisc *q,
@@ -252,7 +236,7 @@ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
                        skb = __skb_dequeue(&q->gso_skb);
                        if (qdisc_is_percpu_stats(q)) {
                                qdisc_qstats_cpu_backlog_dec(q, skb);
-                               qdisc_qstats_atomic_qlen_dec(q);
+                               qdisc_qstats_cpu_qlen_dec(q);
                        } else {
                                qdisc_qstats_backlog_dec(q, skb);
                                q->q.qlen--;
@@ -645,11 +629,7 @@ static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
        if (unlikely(err))
                return qdisc_drop_cpu(skb, qdisc, to_free);
 
-       qdisc_qstats_atomic_qlen_inc(qdisc);
-       /* Note: skb can not be used after skb_array_produce(),
-        * so we better not use qdisc_qstats_cpu_backlog_inc()
-        */
-       this_cpu_add(qdisc->cpu_qstats->backlog, pkt_len);
+       qdisc_update_stats_at_enqueue(qdisc, pkt_len);
        return NET_XMIT_SUCCESS;
 }
 
@@ -668,9 +648,7 @@ static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc)
                skb = __skb_array_consume(q);
        }
        if (likely(skb)) {
-               qdisc_qstats_cpu_backlog_dec(qdisc, skb);
-               qdisc_bstats_cpu_update(qdisc, skb);
-               qdisc_qstats_atomic_qlen_dec(qdisc);
+               qdisc_update_stats_at_dequeue(qdisc, skb);
        } else {
                qdisc->empty = true;
        }
@@ -716,6 +694,7 @@ static void pfifo_fast_reset(struct Qdisc *qdisc)
                struct gnet_stats_queue *q = per_cpu_ptr(qdisc->cpu_qstats, i);
 
                q->backlog = 0;
+               q->qlen = 0;
        }
 }
 
index 24cc220a3218aee4f6c44ed271050c0b8d137ec9..d2ab463f22ae8b122ae43d1969cf795fb11c05b3 100644 (file)
@@ -844,16 +844,6 @@ qdisc_peek_len(struct Qdisc *sch)
        return len;
 }
 
-static void
-hfsc_purge_queue(struct Qdisc *sch, struct hfsc_class *cl)
-{
-       unsigned int len = cl->qdisc->q.qlen;
-       unsigned int backlog = cl->qdisc->qstats.backlog;
-
-       qdisc_reset(cl->qdisc);
-       qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
-}
-
 static void
 hfsc_adjust_levels(struct hfsc_class *cl)
 {
@@ -1076,7 +1066,7 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
        qdisc_class_hash_insert(&q->clhash, &cl->cl_common);
        list_add_tail(&cl->siblings, &parent->children);
        if (parent->level == 0)
-               hfsc_purge_queue(sch, parent);
+               qdisc_purge_queue(parent->qdisc);
        hfsc_adjust_levels(parent);
        sch_tree_unlock(sch);
 
@@ -1112,7 +1102,7 @@ hfsc_delete_class(struct Qdisc *sch, unsigned long arg)
        list_del(&cl->siblings);
        hfsc_adjust_levels(cl->cl_parent);
 
-       hfsc_purge_queue(sch, cl);
+       qdisc_purge_queue(cl->qdisc);
        qdisc_class_hash_remove(&q->clhash, &cl->cl_common);
 
        sch_tree_unlock(sch);
@@ -1328,8 +1318,9 @@ hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
 {
        struct hfsc_class *cl = (struct hfsc_class *)arg;
        struct tc_hfsc_stats xstats;
+       __u32 qlen;
 
-       cl->qstats.backlog = cl->qdisc->qstats.backlog;
+       qdisc_qstats_qlen_backlog(cl->qdisc, &qlen, &cl->qstats.backlog);
        xstats.level   = cl->level;
        xstats.period  = cl->cl_vtperiod;
        xstats.work    = cl->cl_total;
@@ -1337,7 +1328,7 @@ hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
 
        if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), d, NULL, &cl->bstats) < 0 ||
            gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
-           gnet_stats_copy_queue(d, NULL, &cl->qstats, cl->qdisc->q.qlen) < 0)
+           gnet_stats_copy_queue(d, NULL, &cl->qstats, qlen) < 0)
                return -1;
 
        return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
index 30f9da7e1076368f2b0718d2bb0e1e3c5432998c..2f9883b196e8e6b10abd9b623b6285274a003ff6 100644 (file)
@@ -1127,10 +1127,9 @@ htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
        };
        __u32 qlen = 0;
 
-       if (!cl->level && cl->leaf.q) {
-               qlen = cl->leaf.q->q.qlen;
-               qs.backlog = cl->leaf.q->qstats.backlog;
-       }
+       if (!cl->level && cl->leaf.q)
+               qdisc_qstats_qlen_backlog(cl->leaf.q, &qlen, &qs.backlog);
+
        cl->xstats.tokens = clamp_t(s64, PSCHED_NS2TICKS(cl->tokens),
                                    INT_MIN, INT_MAX);
        cl->xstats.ctokens = clamp_t(s64, PSCHED_NS2TICKS(cl->ctokens),
@@ -1270,13 +1269,8 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg)
 
        sch_tree_lock(sch);
 
-       if (!cl->level) {
-               unsigned int qlen = cl->leaf.q->q.qlen;
-               unsigned int backlog = cl->leaf.q->qstats.backlog;
-
-               qdisc_reset(cl->leaf.q);
-               qdisc_tree_reduce_backlog(cl->leaf.q, qlen, backlog);
-       }
+       if (!cl->level)
+               qdisc_purge_queue(cl->leaf.q);
 
        /* delete from hash and active; remainder in destroy_class */
        qdisc_class_hash_remove(&q->clhash, &cl->common);
@@ -1404,12 +1398,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
                                          classid, NULL);
                sch_tree_lock(sch);
                if (parent && !parent->level) {
-                       unsigned int qlen = parent->leaf.q->q.qlen;
-                       unsigned int backlog = parent->leaf.q->qstats.backlog;
-
                        /* turn parent into inner node */
-                       qdisc_reset(parent->leaf.q);
-                       qdisc_tree_reduce_backlog(parent->leaf.q, qlen, backlog);
+                       qdisc_purge_queue(parent->leaf.q);
                        qdisc_put(parent->leaf.q);
                        if (parent->prio_activity)
                                htb_deactivate(q, parent);
index 203659bc3906419f6a00edca96561efb503d608d..3a3312467692c4f17bc03a78299322ac9a67250c 100644 (file)
@@ -249,7 +249,7 @@ static int mq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
 
        sch = dev_queue->qdisc_sleeping;
        if (gnet_stats_copy_basic(&sch->running, d, NULL, &sch->bstats) < 0 ||
-           gnet_stats_copy_queue(d, NULL, &sch->qstats, sch->q.qlen) < 0)
+           qdisc_qstats_copy(d, sch) < 0)
                return -1;
        return 0;
 }
index d364e63c396d78fe8866b9a8d7aa1ec9b281814e..ea0dc112b38dd4ac43d0fdc15f4742583c380991 100644 (file)
@@ -561,8 +561,7 @@ static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
                sch = dev_queue->qdisc_sleeping;
                if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
                                          d, NULL, &sch->bstats) < 0 ||
-                   gnet_stats_copy_queue(d, NULL,
-                                         &sch->qstats, sch->q.qlen) < 0)
+                   qdisc_qstats_copy(d, sch) < 0)
                        return -1;
        }
        return 0;
index 7410ce4d03213d315696ec933722edbc6c542f2a..35b03ae08e0f1f8afbd10f6f4c1d6078e22a48d1 100644 (file)
@@ -201,9 +201,9 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt,
        for (i = q->bands; i < q->max_bands; i++) {
                if (q->queues[i] != &noop_qdisc) {
                        struct Qdisc *child = q->queues[i];
+
                        q->queues[i] = &noop_qdisc;
-                       qdisc_tree_reduce_backlog(child, child->q.qlen,
-                                                 child->qstats.backlog);
+                       qdisc_tree_flush_backlog(child);
                        qdisc_put(child);
                }
        }
@@ -225,9 +225,7 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt,
                                        qdisc_hash_add(child, true);
 
                                if (old != &noop_qdisc) {
-                                       qdisc_tree_reduce_backlog(old,
-                                                                 old->q.qlen,
-                                                                 old->qstats.backlog);
+                                       qdisc_tree_flush_backlog(old);
                                        qdisc_put(old);
                                }
                                sch_tree_unlock(sch);
@@ -344,7 +342,7 @@ static int multiq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
        cl_q = q->queues[cl - 1];
        if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
                                  d, NULL, &cl_q->bstats) < 0 ||
-           gnet_stats_copy_queue(d, NULL, &cl_q->qstats, cl_q->q.qlen) < 0)
+           qdisc_qstats_copy(d, cl_q) < 0)
                return -1;
 
        return 0;
index 847141cd900f1933f0b48684085c747f06c092c1..d519b21535b36b1f163460593573cb018cd1a904 100644 (file)
@@ -216,12 +216,8 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt,
        q->bands = qopt->bands;
        memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1);
 
-       for (i = q->bands; i < oldbands; i++) {
-               struct Qdisc *child = q->queues[i];
-
-               qdisc_tree_reduce_backlog(child, child->q.qlen,
-                                         child->qstats.backlog);
-       }
+       for (i = q->bands; i < oldbands; i++)
+               qdisc_tree_flush_backlog(q->queues[i]);
 
        for (i = oldbands; i < q->bands; i++) {
                q->queues[i] = queues[i];
@@ -365,7 +361,7 @@ static int prio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
        cl_q = q->queues[cl - 1];
        if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
                                  d, NULL, &cl_q->bstats) < 0 ||
-           gnet_stats_copy_queue(d, NULL, &cl_q->qstats, cl_q->q.qlen) < 0)
+           qdisc_qstats_copy(d, cl_q) < 0)
                return -1;
 
        return 0;
index 29f5c4a2468829457ddf734aa1e7711ebfe4bcc8..1589364b54da11dc241212dee190dad741d9d9bc 100644 (file)
@@ -217,15 +217,6 @@ static struct qfq_class *qfq_find_class(struct Qdisc *sch, u32 classid)
        return container_of(clc, struct qfq_class, common);
 }
 
-static void qfq_purge_queue(struct qfq_class *cl)
-{
-       unsigned int len = cl->qdisc->q.qlen;
-       unsigned int backlog = cl->qdisc->qstats.backlog;
-
-       qdisc_reset(cl->qdisc);
-       qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
-}
-
 static const struct nla_policy qfq_policy[TCA_QFQ_MAX + 1] = {
        [TCA_QFQ_WEIGHT] = { .type = NLA_U32 },
        [TCA_QFQ_LMAX] = { .type = NLA_U32 },
@@ -551,7 +542,7 @@ static int qfq_delete_class(struct Qdisc *sch, unsigned long arg)
 
        sch_tree_lock(sch);
 
-       qfq_purge_queue(cl);
+       qdisc_purge_queue(cl->qdisc);
        qdisc_class_hash_remove(&q->clhash, &cl->common);
 
        sch_tree_unlock(sch);
@@ -655,8 +646,7 @@ static int qfq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
        if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
                                  d, NULL, &cl->bstats) < 0 ||
            gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
-           gnet_stats_copy_queue(d, NULL,
-                                 &cl->qdisc->qstats, cl->qdisc->q.qlen) < 0)
+           qdisc_qstats_copy(d, cl->qdisc) < 0)
                return -1;
 
        return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
index 9df9942340eaaa30ed38fc3345649f287a373bee..4e8c0abf619459f396b91fc587271b7938e48330 100644 (file)
@@ -233,8 +233,7 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt,
        q->flags = ctl->flags;
        q->limit = ctl->limit;
        if (child) {
-               qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
-                                         q->qdisc->qstats.backlog);
+               qdisc_tree_flush_backlog(q->qdisc);
                old_child = q->qdisc;
                q->qdisc = child;
        }
index bab506b01a32950d2ac07ff04815424705c62503..2419fdb759667a5c124f2018a310aabe9318b257 100644 (file)
@@ -521,8 +521,7 @@ static int sfb_change(struct Qdisc *sch, struct nlattr *opt,
                qdisc_hash_add(child, true);
        sch_tree_lock(sch);
 
-       qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
-                                 q->qdisc->qstats.backlog);
+       qdisc_tree_flush_backlog(q->qdisc);
        qdisc_put(q->qdisc);
        q->qdisc = child;
 
index 206e4dbed12f0e08a2c8782fedac1247995001bb..1b0fb80162e6076b7d2e1c29b9ba73ed8bfd9c4e 100644 (file)
@@ -20,6 +20,9 @@
 #include <net/pkt_cls.h>
 #include <net/sch_generic.h>
 
+static LIST_HEAD(taprio_list);
+static DEFINE_SPINLOCK(taprio_list_lock);
+
 #define TAPRIO_ALL_GATES_OPEN -1
 
 struct sched_entry {
@@ -42,9 +45,9 @@ struct taprio_sched {
        struct Qdisc *root;
        s64 base_time;
        int clockid;
-       int picos_per_byte; /* Using picoseconds because for 10Gbps+
-                            * speeds it's sub-nanoseconds per byte
-                            */
+       atomic64_t picos_per_byte; /* Using picoseconds because for 10Gbps+
+                                   * speeds it's sub-nanoseconds per byte
+                                   */
        size_t num_entries;
 
        /* Protects the update side of the RCU protected current_entry */
@@ -53,6 +56,7 @@ struct taprio_sched {
        struct list_head entries;
        ktime_t (*get_time)(void);
        struct hrtimer advance_timer;
+       struct list_head taprio_list;
 };
 
 static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch,
@@ -117,7 +121,7 @@ static struct sk_buff *taprio_peek(struct Qdisc *sch)
 
 static inline int length_to_duration(struct taprio_sched *q, int len)
 {
-       return (len * q->picos_per_byte) / 1000;
+       return (len * atomic64_read(&q->picos_per_byte)) / 1000;
 }
 
 static struct sk_buff *taprio_dequeue(struct Qdisc *sch)
@@ -129,6 +133,11 @@ static struct sk_buff *taprio_dequeue(struct Qdisc *sch)
        u32 gate_mask;
        int i;
 
+       if (atomic64_read(&q->picos_per_byte) == -1) {
+               WARN_ONCE(1, "taprio: dequeue() called with unknown picos per byte.");
+               return NULL;
+       }
+
        rcu_read_lock();
        entry = rcu_dereference(q->current_entry);
        /* if there's no entry, it means that the schedule didn't
@@ -233,7 +242,7 @@ static enum hrtimer_restart advance_sched(struct hrtimer *timer)
 
        next->close_time = close_time;
        atomic_set(&next->budget,
-                  (next->interval * 1000) / q->picos_per_byte);
+                  (next->interval * 1000) / atomic64_read(&q->picos_per_byte));
 
 first_run:
        rcu_assign_pointer(q->current_entry, next);
@@ -567,7 +576,8 @@ static void taprio_start_sched(struct Qdisc *sch, ktime_t start)
 
        first->close_time = ktime_add_ns(start, first->interval);
        atomic_set(&first->budget,
-                  (first->interval * 1000) / q->picos_per_byte);
+                  (first->interval * 1000) /
+                  atomic64_read(&q->picos_per_byte));
        rcu_assign_pointer(q->current_entry, NULL);
 
        spin_unlock_irqrestore(&q->current_entry_lock, flags);
@@ -575,6 +585,52 @@ static void taprio_start_sched(struct Qdisc *sch, ktime_t start)
        hrtimer_start(&q->advance_timer, start, HRTIMER_MODE_ABS);
 }
 
+static void taprio_set_picos_per_byte(struct net_device *dev,
+                                     struct taprio_sched *q)
+{
+       struct ethtool_link_ksettings ecmd;
+       int picos_per_byte = -1;
+
+       if (!__ethtool_get_link_ksettings(dev, &ecmd) &&
+           ecmd.base.speed != SPEED_UNKNOWN)
+               picos_per_byte = div64_s64(NSEC_PER_SEC * 1000LL * 8,
+                                          ecmd.base.speed * 1000 * 1000);
+
+       atomic64_set(&q->picos_per_byte, picos_per_byte);
+       netdev_dbg(dev, "taprio: set %s's picos_per_byte to: %lld, linkspeed: %d\n",
+                  dev->name, (long long)atomic64_read(&q->picos_per_byte),
+                  ecmd.base.speed);
+}
+
+static int taprio_dev_notifier(struct notifier_block *nb, unsigned long event,
+                              void *ptr)
+{
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+       struct net_device *qdev;
+       struct taprio_sched *q;
+       bool found = false;
+
+       ASSERT_RTNL();
+
+       if (event != NETDEV_UP && event != NETDEV_CHANGE)
+               return NOTIFY_DONE;
+
+       spin_lock(&taprio_list_lock);
+       list_for_each_entry(q, &taprio_list, taprio_list) {
+               qdev = qdisc_dev(q->root);
+               if (qdev == dev) {
+                       found = true;
+                       break;
+               }
+       }
+       spin_unlock(&taprio_list_lock);
+
+       if (found)
+               taprio_set_picos_per_byte(dev, q);
+
+       return NOTIFY_DONE;
+}
+
 static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
                         struct netlink_ext_ack *extack)
 {
@@ -582,9 +638,7 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
        struct taprio_sched *q = qdisc_priv(sch);
        struct net_device *dev = qdisc_dev(sch);
        struct tc_mqprio_qopt *mqprio = NULL;
-       struct ethtool_link_ksettings ecmd;
        int i, err, size;
-       s64 link_speed;
        ktime_t start;
 
        err = nla_parse_nested(tb, TCA_TAPRIO_ATTR_MAX, opt,
@@ -657,14 +711,7 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
                                               mqprio->prio_tc_map[i]);
        }
 
-       if (!__ethtool_get_link_ksettings(dev, &ecmd))
-               link_speed = ecmd.base.speed;
-       else
-               link_speed = SPEED_1000;
-
-       q->picos_per_byte = div64_s64(NSEC_PER_SEC * 1000LL * 8,
-                                     link_speed * 1000 * 1000);
-
+       taprio_set_picos_per_byte(dev, q);
        start = taprio_get_start_time(sch);
        if (!start)
                return 0;
@@ -681,6 +728,10 @@ static void taprio_destroy(struct Qdisc *sch)
        struct sched_entry *entry, *n;
        unsigned int i;
 
+       spin_lock(&taprio_list_lock);
+       list_del(&q->taprio_list);
+       spin_unlock(&taprio_list_lock);
+
        hrtimer_cancel(&q->advance_timer);
 
        if (q->qdiscs) {
@@ -735,6 +786,10 @@ static int taprio_init(struct Qdisc *sch, struct nlattr *opt,
        if (!opt)
                return -EINVAL;
 
+       spin_lock(&taprio_list_lock);
+       list_add(&q->taprio_list, &taprio_list);
+       spin_unlock(&taprio_list_lock);
+
        return taprio_change(sch, opt, extack);
 }
 
@@ -895,7 +950,7 @@ static int taprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
 
        sch = dev_queue->qdisc_sleeping;
        if (gnet_stats_copy_basic(&sch->running, d, NULL, &sch->bstats) < 0 ||
-           gnet_stats_copy_queue(d, NULL, &sch->qstats, sch->q.qlen) < 0)
+           qdisc_qstats_copy(d, sch) < 0)
                return -1;
        return 0;
 }
@@ -947,14 +1002,24 @@ static struct Qdisc_ops taprio_qdisc_ops __read_mostly = {
        .owner          = THIS_MODULE,
 };
 
+static struct notifier_block taprio_device_notifier = {
+       .notifier_call = taprio_dev_notifier,
+};
+
 static int __init taprio_module_init(void)
 {
+       int err = register_netdevice_notifier(&taprio_device_notifier);
+
+       if (err)
+               return err;
+
        return register_qdisc(&taprio_qdisc_ops);
 }
 
 static void __exit taprio_module_exit(void)
 {
        unregister_qdisc(&taprio_qdisc_ops);
+       unregister_netdevice_notifier(&taprio_device_notifier);
 }
 
 module_init(taprio_module_init);
index 7f272a9070c5753e61dd140eca77afe4d17d6692..f71578dbb9e39292329e06d98535c764043acd55 100644 (file)
@@ -391,8 +391,7 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt,
 
        sch_tree_lock(sch);
        if (child) {
-               qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
-                                         q->qdisc->qstats.backlog);
+               qdisc_tree_flush_backlog(q->qdisc);
                qdisc_put(q->qdisc);
                q->qdisc = child;
        }
index 6abc8b274270730e482730bbc3ef735a7ffd2e52..951afdeea5e92c7cab48f53482d307c3f9893d89 100644 (file)
@@ -600,6 +600,7 @@ static struct sock *sctp_v4_create_accept_sk(struct sock *sk,
 static int sctp_v4_addr_to_user(struct sctp_sock *sp, union sctp_addr *addr)
 {
        /* No address mapping for V4 sockets */
+       memset(addr->v4.sin_zero, 0, sizeof(addr->v4.sin_zero));
        return sizeof(struct sockaddr_in);
 }
 
index c9ae3404b1bb11572e34255cb3eae86ca1dd8131..7dfc34b28f4fb3e98086963afdc636e8c511d58a 100644 (file)
@@ -6412,13 +6412,15 @@ static int sctp_eat_data(const struct sctp_association *asoc,
         * in sctp_ulpevent_make_rcvmsg will drop the frame if we grow our
         * memory usage too much
         */
-       if (*sk->sk_prot_creator->memory_pressure) {
+       if (sk_under_memory_pressure(sk)) {
                if (sctp_tsnmap_has_gap(map) &&
                    (sctp_tsnmap_get_ctsn(map) + 1) == tsn) {
                        pr_debug("%s: under pressure, reneging for tsn:%u\n",
                                 __func__, tsn);
                        deliver = SCTP_CMD_RENEGE;
-                }
+               } else {
+                       sk_mem_reclaim(sk);
+               }
        }
 
        /*
index 9874e60c9b0d00924042c1b377bc0c777edfc4cb..e4e892cc5644811b876d5a1cbd25e4fe3ef30136 100644 (file)
@@ -1913,7 +1913,10 @@ static int sctp_sendmsg_to_asoc(struct sctp_association *asoc,
        if (sctp_wspace(asoc) < (int)msg_len)
                sctp_prsctp_prune(asoc, sinfo, msg_len - sctp_wspace(asoc));
 
-       if (sctp_wspace(asoc) <= 0) {
+       if (sk_under_memory_pressure(sk))
+               sk_mem_reclaim(sk);
+
+       if (sctp_wspace(asoc) <= 0 || !sk_wmem_schedule(sk, msg_len)) {
                timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
                err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len);
                if (err)
@@ -4847,7 +4850,8 @@ static int sctp_connect(struct sock *sk, struct sockaddr *addr,
        }
 
        /* Validate addr_len before calling common connect/connectx routine. */
-       af = sctp_get_af_specific(addr->sa_family);
+       af = addr_len < offsetofend(struct sockaddr, sa_family) ? NULL :
+               sctp_get_af_specific(addr->sa_family);
        if (!af || addr_len < af->sockaddr_len) {
                err = -EINVAL;
        } else {
@@ -8930,7 +8934,10 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
                        goto do_error;
                if (signal_pending(current))
                        goto do_interrupted;
-               if ((int)msg_len <= sctp_wspace(asoc))
+               if (sk_under_memory_pressure(sk))
+                       sk_mem_reclaim(sk);
+               if ((int)msg_len <= sctp_wspace(asoc) &&
+                   sk_wmem_schedule(sk, msg_len))
                        break;
 
                /* Let another process have a go.  Since we are going
index 102c6fefe38c93be2760ca7df0adfe62bead1313..25e0b7e5189c5be42fcd0dd8f669baedd921abdd 100644 (file)
@@ -484,14 +484,15 @@ static struct sctp_ulpevent *sctp_intl_order(struct sctp_ulpq *ulpq,
 }
 
 static int sctp_enqueue_event(struct sctp_ulpq *ulpq,
-                             struct sctp_ulpevent *event)
+                             struct sk_buff_head *skb_list)
 {
-       struct sk_buff *skb = sctp_event2skb(event);
        struct sock *sk = ulpq->asoc->base.sk;
        struct sctp_sock *sp = sctp_sk(sk);
-       struct sk_buff_head *skb_list;
+       struct sctp_ulpevent *event;
+       struct sk_buff *skb;
 
-       skb_list = (struct sk_buff_head *)skb->prev;
+       skb = __skb_peek(skb_list);
+       event = sctp_skb2event(skb);
 
        if (sk->sk_shutdown & RCV_SHUTDOWN &&
            (sk->sk_shutdown & SEND_SHUTDOWN ||
@@ -858,19 +859,24 @@ static int sctp_ulpevent_idata(struct sctp_ulpq *ulpq,
 
        if (!(event->msg_flags & SCTP_DATA_UNORDERED)) {
                event = sctp_intl_reasm(ulpq, event);
-               if (event && event->msg_flags & MSG_EOR) {
+               if (event) {
                        skb_queue_head_init(&temp);
                        __skb_queue_tail(&temp, sctp_event2skb(event));
 
-                       event = sctp_intl_order(ulpq, event);
+                       if (event->msg_flags & MSG_EOR)
+                               event = sctp_intl_order(ulpq, event);
                }
        } else {
                event = sctp_intl_reasm_uo(ulpq, event);
+               if (event) {
+                       skb_queue_head_init(&temp);
+                       __skb_queue_tail(&temp, sctp_event2skb(event));
+               }
        }
 
        if (event) {
                event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0;
-               sctp_enqueue_event(ulpq, event);
+               sctp_enqueue_event(ulpq, &temp);
        }
 
        return event_eor;
@@ -944,20 +950,27 @@ static struct sctp_ulpevent *sctp_intl_retrieve_first(struct sctp_ulpq *ulpq)
 static void sctp_intl_start_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
 {
        struct sctp_ulpevent *event;
+       struct sk_buff_head temp;
 
        if (!skb_queue_empty(&ulpq->reasm)) {
                do {
                        event = sctp_intl_retrieve_first(ulpq);
-                       if (event)
-                               sctp_enqueue_event(ulpq, event);
+                       if (event) {
+                               skb_queue_head_init(&temp);
+                               __skb_queue_tail(&temp, sctp_event2skb(event));
+                               sctp_enqueue_event(ulpq, &temp);
+                       }
                } while (event);
        }
 
        if (!skb_queue_empty(&ulpq->reasm_uo)) {
                do {
                        event = sctp_intl_retrieve_first_uo(ulpq);
-                       if (event)
-                               sctp_enqueue_event(ulpq, event);
+                       if (event) {
+                               skb_queue_head_init(&temp);
+                               __skb_queue_tail(&temp, sctp_event2skb(event));
+                               sctp_enqueue_event(ulpq, &temp);
+                       }
                } while (event);
        }
 }
@@ -1059,7 +1072,7 @@ static void sctp_intl_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
 
        if (event) {
                sctp_intl_retrieve_ordered(ulpq, event);
-               sctp_enqueue_event(ulpq, event);
+               sctp_enqueue_event(ulpq, &temp);
        }
 }
 
@@ -1298,6 +1311,15 @@ static void sctp_handle_iftsn(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk)
                               ntohl(skip->mid), skip->flags);
 }
 
+static int do_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
+{
+       struct sk_buff_head temp;
+
+       skb_queue_head_init(&temp);
+       __skb_queue_tail(&temp, sctp_event2skb(event));
+       return sctp_ulpq_tail_event(ulpq, &temp);
+}
+
 static struct sctp_stream_interleave sctp_stream_interleave_0 = {
        .data_chunk_len         = sizeof(struct sctp_data_chunk),
        .ftsn_chunk_len         = sizeof(struct sctp_fwdtsn_chunk),
@@ -1306,7 +1328,7 @@ static struct sctp_stream_interleave sctp_stream_interleave_0 = {
        .assign_number          = sctp_chunk_assign_ssn,
        .validate_data          = sctp_validate_data,
        .ulpevent_data          = sctp_ulpq_tail_data,
-       .enqueue_event          = sctp_ulpq_tail_event,
+       .enqueue_event          = do_ulpq_tail_event,
        .renege_events          = sctp_ulpq_renege,
        .start_pd               = sctp_ulpq_partial_delivery,
        .abort_pd               = sctp_ulpq_abort_pd,
@@ -1317,6 +1339,16 @@ static struct sctp_stream_interleave sctp_stream_interleave_0 = {
        .handle_ftsn            = sctp_handle_fwdtsn,
 };
 
+static int do_sctp_enqueue_event(struct sctp_ulpq *ulpq,
+                                struct sctp_ulpevent *event)
+{
+       struct sk_buff_head temp;
+
+       skb_queue_head_init(&temp);
+       __skb_queue_tail(&temp, sctp_event2skb(event));
+       return sctp_enqueue_event(ulpq, &temp);
+}
+
 static struct sctp_stream_interleave sctp_stream_interleave_1 = {
        .data_chunk_len         = sizeof(struct sctp_idata_chunk),
        .ftsn_chunk_len         = sizeof(struct sctp_ifwdtsn_chunk),
@@ -1325,7 +1357,7 @@ static struct sctp_stream_interleave sctp_stream_interleave_1 = {
        .assign_number          = sctp_chunk_assign_mid,
        .validate_data          = sctp_validate_idata,
        .ulpevent_data          = sctp_ulpevent_idata,
-       .enqueue_event          = sctp_enqueue_event,
+       .enqueue_event          = do_sctp_enqueue_event,
        .renege_events          = sctp_renege_events,
        .start_pd               = sctp_intl_start_pd,
        .abort_pd               = sctp_intl_abort_pd,
index 8cb7d9858270a617e46e32e988babf86196ef84c..c2a7478587ab4fe53e3e30178b604b3eb46c5f8d 100644 (file)
@@ -634,8 +634,9 @@ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc,
                                                gfp_t gfp)
 {
        struct sctp_ulpevent *event = NULL;
-       struct sk_buff *skb;
-       size_t padding, len;
+       struct sk_buff *skb = chunk->skb;
+       struct sock *sk = asoc->base.sk;
+       size_t padding, datalen;
        int rx_count;
 
        /*
@@ -646,15 +647,12 @@ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc,
        if (asoc->ep->rcvbuf_policy)
                rx_count = atomic_read(&asoc->rmem_alloc);
        else
-               rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc);
+               rx_count = atomic_read(&sk->sk_rmem_alloc);
 
-       if (rx_count >= asoc->base.sk->sk_rcvbuf) {
+       datalen = ntohs(chunk->chunk_hdr->length);
 
-               if ((asoc->base.sk->sk_userlocks & SOCK_RCVBUF_LOCK) ||
-                   (!sk_rmem_schedule(asoc->base.sk, chunk->skb,
-                                      chunk->skb->truesize)))
-                       goto fail;
-       }
+       if (rx_count >= sk->sk_rcvbuf || !sk_rmem_schedule(sk, skb, datalen))
+               goto fail;
 
        /* Clone the original skb, sharing the data.  */
        skb = skb_clone(chunk->skb, gfp);
@@ -681,8 +679,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc,
         * The sender should never pad with more than 3 bytes.  The receiver
         * MUST ignore the padding bytes.
         */
-       len = ntohs(chunk->chunk_hdr->length);
-       padding = SCTP_PAD4(len) - len;
+       padding = SCTP_PAD4(datalen) - datalen;
 
        /* Fixup cloned skb with just this chunks data.  */
        skb_trim(skb, chunk->chunk_end - padding - skb->data);
index 5dde92101743768e05682991759b5713ecbde906..a212fe079c07e17dd533b71bc3dbb5526c89d8c0 100644 (file)
@@ -116,12 +116,13 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
        event = sctp_ulpq_reasm(ulpq, event);
 
        /* Do ordering if needed.  */
-       if ((event) && (event->msg_flags & MSG_EOR)) {
+       if (event) {
                /* Create a temporary list to collect chunks on.  */
                skb_queue_head_init(&temp);
                __skb_queue_tail(&temp, sctp_event2skb(event));
 
-               event = sctp_ulpq_order(ulpq, event);
+               if (event->msg_flags & MSG_EOR)
+                       event = sctp_ulpq_order(ulpq, event);
        }
 
        /* Send event to the ULP.  'event' is the sctp_ulpevent for
@@ -129,7 +130,7 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
         */
        if (event) {
                event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0;
-               sctp_ulpq_tail_event(ulpq, event);
+               sctp_ulpq_tail_event(ulpq, &temp);
        }
 
        return event_eor;
@@ -193,18 +194,17 @@ static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq)
        return sctp_clear_pd(ulpq->asoc->base.sk, ulpq->asoc);
 }
 
-/* If the SKB of 'event' is on a list, it is the first such member
- * of that list.
- */
-int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
+int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sk_buff_head *skb_list)
 {
        struct sock *sk = ulpq->asoc->base.sk;
        struct sctp_sock *sp = sctp_sk(sk);
-       struct sk_buff_head *queue, *skb_list;
-       struct sk_buff *skb = sctp_event2skb(event);
+       struct sctp_ulpevent *event;
+       struct sk_buff_head *queue;
+       struct sk_buff *skb;
        int clear_pd = 0;
 
-       skb_list = (struct sk_buff_head *) skb->prev;
+       skb = __skb_peek(skb_list);
+       event = sctp_skb2event(skb);
 
        /* If the socket is just going to throw this away, do not
         * even try to deliver it.
@@ -257,13 +257,7 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
                }
        }
 
-       /* If we are harvesting multiple skbs they will be
-        * collected on a list.
-        */
-       if (skb_list)
-               skb_queue_splice_tail_init(skb_list, queue);
-       else
-               __skb_queue_tail(queue, skb);
+       skb_queue_splice_tail_init(skb_list, queue);
 
        /* Did we just complete partial delivery and need to get
         * rolling again?  Move pending data to the receive
@@ -738,25 +732,25 @@ void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 fwd_tsn)
 static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq)
 {
        struct sctp_ulpevent *event = NULL;
-       struct sk_buff_head temp;
 
        if (skb_queue_empty(&ulpq->reasm))
                return;
 
        while ((event = sctp_ulpq_retrieve_reassembled(ulpq)) != NULL) {
-               /* Do ordering if needed.  */
-               if ((event) && (event->msg_flags & MSG_EOR)) {
-                       skb_queue_head_init(&temp);
-                       __skb_queue_tail(&temp, sctp_event2skb(event));
+               struct sk_buff_head temp;
+
+               skb_queue_head_init(&temp);
+               __skb_queue_tail(&temp, sctp_event2skb(event));
 
+               /* Do ordering if needed.  */
+               if (event->msg_flags & MSG_EOR)
                        event = sctp_ulpq_order(ulpq, event);
-               }
 
                /* Send event to the ULP.  'event' is the
                 * sctp_ulpevent for  very first SKB on the  temp' list.
                 */
                if (event)
-                       sctp_ulpq_tail_event(ulpq, event);
+                       sctp_ulpq_tail_event(ulpq, &temp);
        }
 }
 
@@ -956,7 +950,7 @@ static void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
        if (event) {
                /* see if we have more ordered that we can deliver */
                sctp_ulpq_retrieve_ordered(ulpq, event);
-               sctp_ulpq_tail_event(ulpq, event);
+               sctp_ulpq_tail_event(ulpq, &temp);
        }
 }
 
@@ -1082,7 +1076,11 @@ void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
                event = sctp_ulpq_retrieve_first(ulpq);
                /* Send event to the ULP.   */
                if (event) {
-                       sctp_ulpq_tail_event(ulpq, event);
+                       struct sk_buff_head temp;
+
+                       skb_queue_head_init(&temp);
+                       __skb_queue_tail(&temp, sctp_event2skb(event));
+                       sctp_ulpq_tail_event(ulpq, &temp);
                        sctp_ulpq_set_pd(ulpq);
                        return;
                }
@@ -1106,7 +1104,8 @@ void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
                        freed += sctp_ulpq_renege_frags(ulpq, needed - freed);
        }
        /* If able to free enough room, accept this chunk. */
-       if (freed >= needed) {
+       if (sk_rmem_schedule(asoc->base.sk, chunk->skb, needed) &&
+           freed >= needed) {
                int retval = sctp_ulpq_tail_data(ulpq, chunk, gfp);
                /*
                 * Enter partial delivery if chunk has not been
index 77ef53596d18c5fd091b6888efbc8b35063087a8..086d9913975ddeabcbfa1a80bd51501ee8d304f0 100644 (file)
@@ -134,11 +134,9 @@ static int smc_release(struct socket *sock)
        smc = smc_sk(sk);
 
        /* cleanup for a dangling non-blocking connect */
-       if (smc->connect_info && sk->sk_state == SMC_INIT)
+       if (smc->connect_nonblock && sk->sk_state == SMC_INIT)
                tcp_abort(smc->clcsock->sk, ECONNABORTED);
        flush_work(&smc->connect_work);
-       kfree(smc->connect_info);
-       smc->connect_info = NULL;
 
        if (sk->sk_state == SMC_LISTEN)
                /* smc_close_non_accepted() is called and acquires
@@ -167,10 +165,9 @@ static int smc_release(struct socket *sock)
 
        if (sk->sk_state == SMC_CLOSED) {
                if (smc->clcsock) {
-                       mutex_lock(&smc->clcsock_release_lock);
-                       sock_release(smc->clcsock);
-                       smc->clcsock = NULL;
-                       mutex_unlock(&smc->clcsock_release_lock);
+                       release_sock(sk);
+                       smc_clcsock_release(smc);
+                       lock_sock(sk);
                }
                if (!smc->use_fallback)
                        smc_conn_free(&smc->conn);
@@ -446,12 +443,22 @@ static void smc_link_save_peer_info(struct smc_link *link,
        link->peer_mtu = clc->qp_mtu;
 }
 
+static void smc_switch_to_fallback(struct smc_sock *smc)
+{
+       smc->use_fallback = true;
+       if (smc->sk.sk_socket && smc->sk.sk_socket->file) {
+               smc->clcsock->file = smc->sk.sk_socket->file;
+               smc->clcsock->file->private_data = smc->clcsock;
+       }
+}
+
 /* fall back during connect */
 static int smc_connect_fallback(struct smc_sock *smc, int reason_code)
 {
-       smc->use_fallback = true;
+       smc_switch_to_fallback(smc);
        smc->fallback_rsn = reason_code;
        smc_copy_sock_settings_to_clc(smc);
+       smc->connect_nonblock = 0;
        if (smc->sk.sk_state == SMC_INIT)
                smc->sk.sk_state = SMC_ACTIVE;
        return 0;
@@ -491,46 +498,41 @@ static int smc_connect_abort(struct smc_sock *smc, int reason_code,
                mutex_unlock(&smc_client_lgr_pending);
 
        smc_conn_free(&smc->conn);
+       smc->connect_nonblock = 0;
        return reason_code;
 }
 
 /* check if there is a rdma device available for this connection. */
 /* called for connect and listen */
-static int smc_check_rdma(struct smc_sock *smc, struct smc_ib_device **ibdev,
-                         u8 *ibport, unsigned short vlan_id, u8 gid[])
+static int smc_find_rdma_device(struct smc_sock *smc, struct smc_init_info *ini)
 {
-       int reason_code = 0;
-
        /* PNET table look up: search active ib_device and port
         * within same PNETID that also contains the ethernet device
         * used for the internal TCP socket
         */
-       smc_pnet_find_roce_resource(smc->clcsock->sk, ibdev, ibport, vlan_id,
-                                   gid);
-       if (!(*ibdev))
-               reason_code = SMC_CLC_DECL_CNFERR; /* configuration error */
-
-       return reason_code;
+       smc_pnet_find_roce_resource(smc->clcsock->sk, ini);
+       if (!ini->ib_dev)
+               return SMC_CLC_DECL_NOSMCRDEV;
+       return 0;
 }
 
 /* check if there is an ISM device available for this connection. */
 /* called for connect and listen */
-static int smc_check_ism(struct smc_sock *smc, struct smcd_dev **ismdev)
+static int smc_find_ism_device(struct smc_sock *smc, struct smc_init_info *ini)
 {
        /* Find ISM device with same PNETID as connecting interface  */
-       smc_pnet_find_ism_resource(smc->clcsock->sk, ismdev);
-       if (!(*ismdev))
-               return SMC_CLC_DECL_CNFERR; /* configuration error */
+       smc_pnet_find_ism_resource(smc->clcsock->sk, ini);
+       if (!ini->ism_dev)
+               return SMC_CLC_DECL_NOSMCDDEV;
        return 0;
 }
 
 /* Check for VLAN ID and register it on ISM device just for CLC handshake */
 static int smc_connect_ism_vlan_setup(struct smc_sock *smc,
-                                     struct smcd_dev *ismdev,
-                                     unsigned short vlan_id)
+                                     struct smc_init_info *ini)
 {
-       if (vlan_id && smc_ism_get_vlan(ismdev, vlan_id))
-               return SMC_CLC_DECL_CNFERR;
+       if (ini->vlan_id && smc_ism_get_vlan(ini->ism_dev, ini->vlan_id))
+               return SMC_CLC_DECL_ISMVLANERR;
        return 0;
 }
 
@@ -538,12 +540,11 @@ static int smc_connect_ism_vlan_setup(struct smc_sock *smc,
  * used, the VLAN ID will be registered again during the connection setup.
  */
 static int smc_connect_ism_vlan_cleanup(struct smc_sock *smc, bool is_smcd,
-                                       struct smcd_dev *ismdev,
-                                       unsigned short vlan_id)
+                                       struct smc_init_info *ini)
 {
        if (!is_smcd)
                return 0;
-       if (vlan_id && smc_ism_put_vlan(ismdev, vlan_id))
+       if (ini->vlan_id && smc_ism_put_vlan(ini->ism_dev, ini->vlan_id))
                return SMC_CLC_DECL_CNFERR;
        return 0;
 }
@@ -551,13 +552,12 @@ static int smc_connect_ism_vlan_cleanup(struct smc_sock *smc, bool is_smcd,
 /* CLC handshake during connect */
 static int smc_connect_clc(struct smc_sock *smc, int smc_type,
                           struct smc_clc_msg_accept_confirm *aclc,
-                          struct smc_ib_device *ibdev, u8 ibport,
-                          u8 gid[], struct smcd_dev *ismdev)
+                          struct smc_init_info *ini)
 {
        int rc = 0;
 
        /* do inband token exchange */
-       rc = smc_clc_send_proposal(smc, smc_type, ibdev, ibport, gid, ismdev);
+       rc = smc_clc_send_proposal(smc, smc_type, ini);
        if (rc)
                return rc;
        /* receive SMC Accept CLC message */
@@ -568,23 +568,19 @@ static int smc_connect_clc(struct smc_sock *smc, int smc_type,
 /* setup for RDMA connection of client */
 static int smc_connect_rdma(struct smc_sock *smc,
                            struct smc_clc_msg_accept_confirm *aclc,
-                           struct smc_ib_device *ibdev, u8 ibport)
+                           struct smc_init_info *ini)
 {
-       int local_contact = SMC_FIRST_CONTACT;
        struct smc_link *link;
        int reason_code = 0;
 
+       ini->is_smcd = false;
+       ini->ib_lcl = &aclc->lcl;
+       ini->ib_clcqpn = ntoh24(aclc->qpn);
+       ini->srv_first_contact = aclc->hdr.flag;
+
        mutex_lock(&smc_client_lgr_pending);
-       local_contact = smc_conn_create(smc, false, aclc->hdr.flag, ibdev,
-                                       ibport, ntoh24(aclc->qpn), &aclc->lcl,
-                                       NULL, 0);
-       if (local_contact < 0) {
-               if (local_contact == -ENOMEM)
-                       reason_code = SMC_CLC_DECL_MEM;/* insufficient memory*/
-               else if (local_contact == -ENOLINK)
-                       reason_code = SMC_CLC_DECL_SYNCERR; /* synchr. error */
-               else
-                       reason_code = SMC_CLC_DECL_INTERR; /* other error */
+       reason_code = smc_conn_create(smc, ini);
+       if (reason_code) {
                mutex_unlock(&smc_client_lgr_pending);
                return reason_code;
        }
@@ -594,45 +590,48 @@ static int smc_connect_rdma(struct smc_sock *smc,
 
        /* create send buffer and rmb */
        if (smc_buf_create(smc, false))
-               return smc_connect_abort(smc, SMC_CLC_DECL_MEM, local_contact);
+               return smc_connect_abort(smc, SMC_CLC_DECL_MEM,
+                                        ini->cln_first_contact);
 
-       if (local_contact == SMC_FIRST_CONTACT)
+       if (ini->cln_first_contact == SMC_FIRST_CONTACT)
                smc_link_save_peer_info(link, aclc);
 
        if (smc_rmb_rtoken_handling(&smc->conn, aclc))
                return smc_connect_abort(smc, SMC_CLC_DECL_ERR_RTOK,
-                                        local_contact);
+                                        ini->cln_first_contact);
 
        smc_close_init(smc);
        smc_rx_init(smc);
 
-       if (local_contact == SMC_FIRST_CONTACT) {
+       if (ini->cln_first_contact == SMC_FIRST_CONTACT) {
                if (smc_ib_ready_link(link))
                        return smc_connect_abort(smc, SMC_CLC_DECL_ERR_RDYLNK,
-                                                local_contact);
+                                                ini->cln_first_contact);
        } else {
                if (smc_reg_rmb(link, smc->conn.rmb_desc, true))
                        return smc_connect_abort(smc, SMC_CLC_DECL_ERR_REGRMB,
-                                                local_contact);
+                                                ini->cln_first_contact);
        }
        smc_rmb_sync_sg_for_device(&smc->conn);
 
        reason_code = smc_clc_send_confirm(smc);
        if (reason_code)
-               return smc_connect_abort(smc, reason_code, local_contact);
+               return smc_connect_abort(smc, reason_code,
+                                        ini->cln_first_contact);
 
        smc_tx_init(smc);
 
-       if (local_contact == SMC_FIRST_CONTACT) {
+       if (ini->cln_first_contact == SMC_FIRST_CONTACT) {
                /* QP confirmation over RoCE fabric */
                reason_code = smc_clnt_conf_first_link(smc);
                if (reason_code)
                        return smc_connect_abort(smc, reason_code,
-                                                local_contact);
+                                                ini->cln_first_contact);
        }
        mutex_unlock(&smc_client_lgr_pending);
 
        smc_copy_sock_settings_to_clc(smc);
+       smc->connect_nonblock = 0;
        if (smc->sk.sk_state == SMC_INIT)
                smc->sk.sk_state = SMC_ACTIVE;
 
@@ -642,23 +641,26 @@ static int smc_connect_rdma(struct smc_sock *smc,
 /* setup for ISM connection of client */
 static int smc_connect_ism(struct smc_sock *smc,
                           struct smc_clc_msg_accept_confirm *aclc,
-                          struct smcd_dev *ismdev)
+                          struct smc_init_info *ini)
 {
-       int local_contact = SMC_FIRST_CONTACT;
        int rc = 0;
 
+       ini->is_smcd = true;
+       ini->ism_gid = aclc->gid;
+       ini->srv_first_contact = aclc->hdr.flag;
+
        /* there is only one lgr role for SMC-D; use server lock */
        mutex_lock(&smc_server_lgr_pending);
-       local_contact = smc_conn_create(smc, true, aclc->hdr.flag, NULL, 0, 0,
-                                       NULL, ismdev, aclc->gid);
-       if (local_contact < 0) {
+       rc = smc_conn_create(smc, ini);
+       if (rc) {
                mutex_unlock(&smc_server_lgr_pending);
-               return SMC_CLC_DECL_MEM;
+               return rc;
        }
 
        /* Create send and receive buffers */
        if (smc_buf_create(smc, true))
-               return smc_connect_abort(smc, SMC_CLC_DECL_MEM, local_contact);
+               return smc_connect_abort(smc, SMC_CLC_DECL_MEM,
+                                        ini->cln_first_contact);
 
        smc_conn_save_peer_info(smc, aclc);
        smc_close_init(smc);
@@ -667,10 +669,11 @@ static int smc_connect_ism(struct smc_sock *smc,
 
        rc = smc_clc_send_confirm(smc);
        if (rc)
-               return smc_connect_abort(smc, rc, local_contact);
+               return smc_connect_abort(smc, rc, ini->cln_first_contact);
        mutex_unlock(&smc_server_lgr_pending);
 
        smc_copy_sock_settings_to_clc(smc);
+       smc->connect_nonblock = 0;
        if (smc->sk.sk_state == SMC_INIT)
                smc->sk.sk_state = SMC_ACTIVE;
 
@@ -682,13 +685,9 @@ static int __smc_connect(struct smc_sock *smc)
 {
        bool ism_supported = false, rdma_supported = false;
        struct smc_clc_msg_accept_confirm aclc;
-       struct smc_ib_device *ibdev;
-       struct smcd_dev *ismdev;
-       u8 gid[SMC_GID_SIZE];
-       unsigned short vlan;
+       struct smc_init_info ini = {0};
        int smc_type;
        int rc = 0;
-       u8 ibport;
 
        sock_hold(&smc->sk); /* sock put in passive closing */
 
@@ -703,20 +702,21 @@ static int __smc_connect(struct smc_sock *smc)
        if (using_ipsec(smc))
                return smc_connect_decline_fallback(smc, SMC_CLC_DECL_IPSEC);
 
-       /* check for VLAN ID */
-       if (smc_vlan_by_tcpsk(smc->clcsock, &vlan))
-               return smc_connect_decline_fallback(smc, SMC_CLC_DECL_CNFERR);
+       /* get vlan id from IP device */
+       if (smc_vlan_by_tcpsk(smc->clcsock, &ini))
+               return smc_connect_decline_fallback(smc,
+                                                   SMC_CLC_DECL_GETVLANERR);
 
        /* check if there is an ism device available */
-       if (!smc_check_ism(smc, &ismdev) &&
-           !smc_connect_ism_vlan_setup(smc, ismdev, vlan)) {
+       if (!smc_find_ism_device(smc, &ini) &&
+           !smc_connect_ism_vlan_setup(smc, &ini)) {
                /* ISM is supported for this connection */
                ism_supported = true;
                smc_type = SMC_TYPE_D;
        }
 
        /* check if there is a rdma device available */
-       if (!smc_check_rdma(smc, &ibdev, &ibport, vlan, gid)) {
+       if (!smc_find_rdma_device(smc, &ini)) {
                /* RDMA is supported for this connection */
                rdma_supported = true;
                if (ism_supported)
@@ -730,25 +730,25 @@ static int __smc_connect(struct smc_sock *smc)
                return smc_connect_decline_fallback(smc, SMC_CLC_DECL_NOSMCDEV);
 
        /* perform CLC handshake */
-       rc = smc_connect_clc(smc, smc_type, &aclc, ibdev, ibport, gid, ismdev);
+       rc = smc_connect_clc(smc, smc_type, &aclc, &ini);
        if (rc) {
-               smc_connect_ism_vlan_cleanup(smc, ism_supported, ismdev, vlan);
+               smc_connect_ism_vlan_cleanup(smc, ism_supported, &ini);
                return smc_connect_decline_fallback(smc, rc);
        }
 
        /* depending on previous steps, connect using rdma or ism */
        if (rdma_supported && aclc.hdr.path == SMC_TYPE_R)
-               rc = smc_connect_rdma(smc, &aclc, ibdev, ibport);
+               rc = smc_connect_rdma(smc, &aclc, &ini);
        else if (ism_supported && aclc.hdr.path == SMC_TYPE_D)
-               rc = smc_connect_ism(smc, &aclc, ismdev);
+               rc = smc_connect_ism(smc, &aclc, &ini);
        else
                rc = SMC_CLC_DECL_MODEUNSUPP;
        if (rc) {
-               smc_connect_ism_vlan_cleanup(smc, ism_supported, ismdev, vlan);
+               smc_connect_ism_vlan_cleanup(smc, ism_supported, &ini);
                return smc_connect_decline_fallback(smc, rc);
        }
 
-       smc_connect_ism_vlan_cleanup(smc, ism_supported, ismdev, vlan);
+       smc_connect_ism_vlan_cleanup(smc, ism_supported, &ini);
        return 0;
 }
 
@@ -756,17 +756,30 @@ static void smc_connect_work(struct work_struct *work)
 {
        struct smc_sock *smc = container_of(work, struct smc_sock,
                                            connect_work);
-       int rc;
+       long timeo = smc->sk.sk_sndtimeo;
+       int rc = 0;
 
-       lock_sock(&smc->sk);
-       rc = kernel_connect(smc->clcsock, &smc->connect_info->addr,
-                           smc->connect_info->alen, smc->connect_info->flags);
+       if (!timeo)
+               timeo = MAX_SCHEDULE_TIMEOUT;
+       lock_sock(smc->clcsock->sk);
        if (smc->clcsock->sk->sk_err) {
                smc->sk.sk_err = smc->clcsock->sk->sk_err;
-               goto out;
-       }
-       if (rc < 0) {
-               smc->sk.sk_err = -rc;
+       } else if ((1 << smc->clcsock->sk->sk_state) &
+                                       (TCPF_SYN_SENT | TCP_SYN_RECV)) {
+               rc = sk_stream_wait_connect(smc->clcsock->sk, &timeo);
+               if ((rc == -EPIPE) &&
+                   ((1 << smc->clcsock->sk->sk_state) &
+                                       (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)))
+                       rc = 0;
+       }
+       release_sock(smc->clcsock->sk);
+       lock_sock(&smc->sk);
+       if (rc != 0 || smc->sk.sk_err) {
+               smc->sk.sk_state = SMC_CLOSED;
+               if (rc == -EPIPE || rc == -EAGAIN)
+                       smc->sk.sk_err = EPIPE;
+               else if (signal_pending(current))
+                       smc->sk.sk_err = -sock_intr_errno(timeo);
                goto out;
        }
 
@@ -775,12 +788,14 @@ static void smc_connect_work(struct work_struct *work)
                smc->sk.sk_err = -rc;
 
 out:
-       if (smc->sk.sk_err)
-               smc->sk.sk_state_change(&smc->sk);
-       else
-               smc->sk.sk_write_space(&smc->sk);
-       kfree(smc->connect_info);
-       smc->connect_info = NULL;
+       if (!sock_flag(&smc->sk, SOCK_DEAD)) {
+               if (smc->sk.sk_err) {
+                       smc->sk.sk_state_change(&smc->sk);
+               } else { /* allow polling before and after fallback decision */
+                       smc->clcsock->sk->sk_write_space(smc->clcsock->sk);
+                       smc->sk.sk_write_space(&smc->sk);
+               }
+       }
        release_sock(&smc->sk);
 }
 
@@ -813,26 +828,18 @@ static int smc_connect(struct socket *sock, struct sockaddr *addr,
 
        smc_copy_sock_settings_to_clc(smc);
        tcp_sk(smc->clcsock->sk)->syn_smc = 1;
+       if (smc->connect_nonblock) {
+               rc = -EALREADY;
+               goto out;
+       }
+       rc = kernel_connect(smc->clcsock, addr, alen, flags);
+       if (rc && rc != -EINPROGRESS)
+               goto out;
        if (flags & O_NONBLOCK) {
-               if (smc->connect_info) {
-                       rc = -EALREADY;
-                       goto out;
-               }
-               smc->connect_info = kzalloc(alen + 2 * sizeof(int), GFP_KERNEL);
-               if (!smc->connect_info) {
-                       rc = -ENOMEM;
-                       goto out;
-               }
-               smc->connect_info->alen = alen;
-               smc->connect_info->flags = flags ^ O_NONBLOCK;
-               memcpy(&smc->connect_info->addr, addr, alen);
-               schedule_work(&smc->connect_work);
+               if (schedule_work(&smc->connect_work))
+                       smc->connect_nonblock = 1;
                rc = -EINPROGRESS;
        } else {
-               rc = kernel_connect(smc->clcsock, addr, alen, flags);
-               if (rc)
-                       goto out;
-
                rc = __smc_connect(smc);
                if (rc < 0)
                        goto out;
@@ -872,11 +879,11 @@ static int smc_clcsock_accept(struct smc_sock *lsmc, struct smc_sock **new_smc)
        if  (rc < 0)
                lsk->sk_err = -rc;
        if (rc < 0 || lsk->sk_state == SMC_CLOSED) {
+               new_sk->sk_prot->unhash(new_sk);
                if (new_clcsock)
                        sock_release(new_clcsock);
                new_sk->sk_state = SMC_CLOSED;
                sock_set_flag(new_sk, SOCK_DEAD);
-               new_sk->sk_prot->unhash(new_sk);
                sock_put(new_sk); /* final */
                *new_smc = NULL;
                goto out;
@@ -927,16 +934,21 @@ struct sock *smc_accept_dequeue(struct sock *parent,
 
                smc_accept_unlink(new_sk);
                if (new_sk->sk_state == SMC_CLOSED) {
+                       new_sk->sk_prot->unhash(new_sk);
                        if (isk->clcsock) {
                                sock_release(isk->clcsock);
                                isk->clcsock = NULL;
                        }
-                       new_sk->sk_prot->unhash(new_sk);
                        sock_put(new_sk); /* final */
                        continue;
                }
-               if (new_sock)
+               if (new_sock) {
                        sock_graft(new_sk, new_sock);
+                       if (isk->use_fallback) {
+                               smc_sk(new_sk)->clcsock->file = new_sock->file;
+                               isk->clcsock->file->private_data = isk->clcsock;
+                       }
+               }
                return new_sk;
        }
        return NULL;
@@ -956,6 +968,7 @@ void smc_close_non_accepted(struct sock *sk)
                sock_set_flag(sk, SOCK_DEAD);
                sk->sk_shutdown |= SHUTDOWN_MASK;
        }
+       sk->sk_prot->unhash(sk);
        if (smc->clcsock) {
                struct socket *tcp;
 
@@ -971,7 +984,6 @@ void smc_close_non_accepted(struct sock *sk)
                        smc_conn_free(&smc->conn);
        }
        release_sock(sk);
-       sk->sk_prot->unhash(sk);
        sock_put(sk); /* final sock_put */
 }
 
@@ -1037,13 +1049,13 @@ static void smc_listen_out(struct smc_sock *new_smc)
        struct smc_sock *lsmc = new_smc->listen_smc;
        struct sock *newsmcsk = &new_smc->sk;
 
-       lock_sock_nested(&lsmc->sk, SINGLE_DEPTH_NESTING);
        if (lsmc->sk.sk_state == SMC_LISTEN) {
+               lock_sock_nested(&lsmc->sk, SINGLE_DEPTH_NESTING);
                smc_accept_enqueue(&lsmc->sk, newsmcsk);
+               release_sock(&lsmc->sk);
        } else { /* no longer listening */
                smc_close_non_accepted(newsmcsk);
        }
-       release_sock(&lsmc->sk);
 
        /* Wake up accept */
        lsmc->sk.sk_data_ready(&lsmc->sk);
@@ -1087,7 +1099,7 @@ static void smc_listen_decline(struct smc_sock *new_smc, int reason_code,
                return;
        }
        smc_conn_free(&new_smc->conn);
-       new_smc->use_fallback = true;
+       smc_switch_to_fallback(new_smc);
        new_smc->fallback_rsn = reason_code;
        if (reason_code && reason_code != SMC_CLC_DECL_PEERDECL) {
                if (smc_clc_send_decline(new_smc, reason_code) < 0) {
@@ -1099,7 +1111,7 @@ static void smc_listen_decline(struct smc_sock *new_smc, int reason_code,
 }
 
 /* listen worker: check prefixes */
-static int smc_listen_rdma_check(struct smc_sock *new_smc,
+static int smc_listen_prfx_check(struct smc_sock *new_smc,
                                 struct smc_clc_msg_proposal *pclc)
 {
        struct smc_clc_msg_proposal_prefix *pclc_prfx;
@@ -1107,25 +1119,21 @@ static int smc_listen_rdma_check(struct smc_sock *new_smc,
 
        pclc_prfx = smc_clc_proposal_get_prefix(pclc);
        if (smc_clc_prfx_match(newclcsock, pclc_prfx))
-               return SMC_CLC_DECL_CNFERR;
+               return SMC_CLC_DECL_DIFFPREFIX;
 
        return 0;
 }
 
 /* listen worker: initialize connection and buffers */
 static int smc_listen_rdma_init(struct smc_sock *new_smc,
-                               struct smc_clc_msg_proposal *pclc,
-                               struct smc_ib_device *ibdev, u8 ibport,
-                               int *local_contact)
+                               struct smc_init_info *ini)
 {
+       int rc;
+
        /* allocate connection / link group */
-       *local_contact = smc_conn_create(new_smc, false, 0, ibdev, ibport, 0,
-                                        &pclc->lcl, NULL, 0);
-       if (*local_contact < 0) {
-               if (*local_contact == -ENOMEM)
-                       return SMC_CLC_DECL_MEM;/* insufficient memory*/
-               return SMC_CLC_DECL_INTERR; /* other error */
-       }
+       rc = smc_conn_create(new_smc, ini);
+       if (rc)
+               return rc;
 
        /* create send buffer and rmb */
        if (smc_buf_create(new_smc, false))
@@ -1137,33 +1145,30 @@ static int smc_listen_rdma_init(struct smc_sock *new_smc,
 /* listen worker: initialize connection and buffers for SMC-D */
 static int smc_listen_ism_init(struct smc_sock *new_smc,
                               struct smc_clc_msg_proposal *pclc,
-                              struct smcd_dev *ismdev,
-                              int *local_contact)
+                              struct smc_init_info *ini)
 {
        struct smc_clc_msg_smcd *pclc_smcd;
+       int rc;
 
        pclc_smcd = smc_get_clc_msg_smcd(pclc);
-       *local_contact = smc_conn_create(new_smc, true, 0, NULL, 0, 0, NULL,
-                                        ismdev, pclc_smcd->gid);
-       if (*local_contact < 0) {
-               if (*local_contact == -ENOMEM)
-                       return SMC_CLC_DECL_MEM;/* insufficient memory*/
-               return SMC_CLC_DECL_INTERR; /* other error */
-       }
+       ini->ism_gid = pclc_smcd->gid;
+       rc = smc_conn_create(new_smc, ini);
+       if (rc)
+               return rc;
 
        /* Check if peer can be reached via ISM device */
        if (smc_ism_cantalk(new_smc->conn.lgr->peer_gid,
                            new_smc->conn.lgr->vlan_id,
                            new_smc->conn.lgr->smcd)) {
-               if (*local_contact == SMC_FIRST_CONTACT)
+               if (ini->cln_first_contact == SMC_FIRST_CONTACT)
                        smc_lgr_forget(new_smc->conn.lgr);
                smc_conn_free(&new_smc->conn);
-               return SMC_CLC_DECL_CNFERR;
+               return SMC_CLC_DECL_SMCDNOTALK;
        }
 
        /* Create send and receive buffers */
        if (smc_buf_create(new_smc, true)) {
-               if (*local_contact == SMC_FIRST_CONTACT)
+               if (ini->cln_first_contact == SMC_FIRST_CONTACT)
                        smc_lgr_forget(new_smc->conn.lgr);
                smc_conn_free(&new_smc->conn);
                return SMC_CLC_DECL_MEM;
@@ -1227,15 +1232,13 @@ static void smc_listen_work(struct work_struct *work)
        struct socket *newclcsock = new_smc->clcsock;
        struct smc_clc_msg_accept_confirm cclc;
        struct smc_clc_msg_proposal *pclc;
-       struct smc_ib_device *ibdev;
+       struct smc_init_info ini = {0};
        bool ism_supported = false;
-       struct smcd_dev *ismdev;
        u8 buf[SMC_CLC_MAX_LEN];
-       int local_contact = 0;
-       unsigned short vlan;
-       int reason_code = 0;
        int rc = 0;
-       u8 ibport;
+
+       if (new_smc->listen_smc->sk.sk_state != SMC_LISTEN)
+               return smc_listen_out_err(new_smc);
 
        if (new_smc->use_fallback) {
                smc_listen_out_connected(new_smc);
@@ -1244,7 +1247,7 @@ static void smc_listen_work(struct work_struct *work)
 
        /* check if peer is smc capable */
        if (!tcp_sk(newclcsock->sk)->syn_smc) {
-               new_smc->use_fallback = true;
+               smc_switch_to_fallback(new_smc);
                new_smc->fallback_rsn = SMC_CLC_DECL_PEERNOSMC;
                smc_listen_out_connected(new_smc);
                return;
@@ -1254,17 +1257,26 @@ static void smc_listen_work(struct work_struct *work)
         * wait for and receive SMC Proposal CLC message
         */
        pclc = (struct smc_clc_msg_proposal *)&buf;
-       reason_code = smc_clc_wait_msg(new_smc, pclc, SMC_CLC_MAX_LEN,
-                                      SMC_CLC_PROPOSAL, CLC_WAIT_TIME);
-       if (reason_code) {
-               smc_listen_decline(new_smc, reason_code, 0);
-               return;
-       }
+       rc = smc_clc_wait_msg(new_smc, pclc, SMC_CLC_MAX_LEN,
+                             SMC_CLC_PROPOSAL, CLC_WAIT_TIME);
+       if (rc)
+               goto out_decl;
 
        /* IPSec connections opt out of SMC-R optimizations */
        if (using_ipsec(new_smc)) {
-               smc_listen_decline(new_smc, SMC_CLC_DECL_IPSEC, 0);
-               return;
+               rc = SMC_CLC_DECL_IPSEC;
+               goto out_decl;
+       }
+
+       /* check for matching IP prefix and subnet length */
+       rc = smc_listen_prfx_check(new_smc, pclc);
+       if (rc)
+               goto out_decl;
+
+       /* get vlan id from IP device */
+       if (smc_vlan_by_tcpsk(new_smc->clcsock, &ini)) {
+               rc = SMC_CLC_DECL_GETVLANERR;
+               goto out_decl;
        }
 
        mutex_lock(&smc_server_lgr_pending);
@@ -1273,59 +1285,73 @@ static void smc_listen_work(struct work_struct *work)
        smc_tx_init(new_smc);
 
        /* check if ISM is available */
-       if ((pclc->hdr.path == SMC_TYPE_D || pclc->hdr.path == SMC_TYPE_B) &&
-           !smc_check_ism(new_smc, &ismdev) &&
-           !smc_listen_ism_init(new_smc, pclc, ismdev, &local_contact)) {
-               ism_supported = true;
+       if (pclc->hdr.path == SMC_TYPE_D || pclc->hdr.path == SMC_TYPE_B) {
+               ini.is_smcd = true; /* prepare ISM check */
+               rc = smc_find_ism_device(new_smc, &ini);
+               if (!rc)
+                       rc = smc_listen_ism_init(new_smc, pclc, &ini);
+               if (!rc)
+                       ism_supported = true;
+               else if (pclc->hdr.path == SMC_TYPE_D)
+                       goto out_unlock; /* skip RDMA and decline */
        }
 
        /* check if RDMA is available */
-       if (!ism_supported &&
-           ((pclc->hdr.path != SMC_TYPE_R && pclc->hdr.path != SMC_TYPE_B) ||
-            smc_vlan_by_tcpsk(new_smc->clcsock, &vlan) ||
-            smc_check_rdma(new_smc, &ibdev, &ibport, vlan, NULL) ||
-            smc_listen_rdma_check(new_smc, pclc) ||
-            smc_listen_rdma_init(new_smc, pclc, ibdev, ibport,
-                                 &local_contact) ||
-            smc_listen_rdma_reg(new_smc, local_contact))) {
-               /* SMC not supported, decline */
-               mutex_unlock(&smc_server_lgr_pending);
-               smc_listen_decline(new_smc, SMC_CLC_DECL_MODEUNSUPP,
-                                  local_contact);
-               return;
+       if (!ism_supported) { /* SMC_TYPE_R or SMC_TYPE_B */
+               /* prepare RDMA check */
+               memset(&ini, 0, sizeof(ini));
+               ini.is_smcd = false;
+               ini.ib_lcl = &pclc->lcl;
+               rc = smc_find_rdma_device(new_smc, &ini);
+               if (rc) {
+                       /* no RDMA device found */
+                       if (pclc->hdr.path == SMC_TYPE_B)
+                               /* neither ISM nor RDMA device found */
+                               rc = SMC_CLC_DECL_NOSMCDEV;
+                       goto out_unlock;
+               }
+               rc = smc_listen_rdma_init(new_smc, &ini);
+               if (rc)
+                       goto out_unlock;
+               rc = smc_listen_rdma_reg(new_smc, ini.cln_first_contact);
+               if (rc)
+                       goto out_unlock;
        }
 
        /* send SMC Accept CLC message */
-       rc = smc_clc_send_accept(new_smc, local_contact);
-       if (rc) {
-               mutex_unlock(&smc_server_lgr_pending);
-               smc_listen_decline(new_smc, rc, local_contact);
-               return;
-       }
+       rc = smc_clc_send_accept(new_smc, ini.cln_first_contact);
+       if (rc)
+               goto out_unlock;
 
        /* SMC-D does not need this lock any more */
        if (ism_supported)
                mutex_unlock(&smc_server_lgr_pending);
 
        /* receive SMC Confirm CLC message */
-       reason_code = smc_clc_wait_msg(new_smc, &cclc, sizeof(cclc),
-                                      SMC_CLC_CONFIRM, CLC_WAIT_TIME);
-       if (reason_code) {
+       rc = smc_clc_wait_msg(new_smc, &cclc, sizeof(cclc),
+                             SMC_CLC_CONFIRM, CLC_WAIT_TIME);
+       if (rc) {
                if (!ism_supported)
-                       mutex_unlock(&smc_server_lgr_pending);
-               smc_listen_decline(new_smc, reason_code, local_contact);
-               return;
+                       goto out_unlock;
+               goto out_decl;
        }
 
        /* finish worker */
        if (!ism_supported) {
-               rc = smc_listen_rdma_finish(new_smc, &cclc, local_contact);
+               rc = smc_listen_rdma_finish(new_smc, &cclc,
+                                           ini.cln_first_contact);
                mutex_unlock(&smc_server_lgr_pending);
                if (rc)
                        return;
        }
        smc_conn_save_peer_info(new_smc, &cclc);
        smc_listen_out_connected(new_smc);
+       return;
+
+out_unlock:
+       mutex_unlock(&smc_server_lgr_pending);
+out_decl:
+       smc_listen_decline(new_smc, rc, ini.cln_first_contact);
 }
 
 static void smc_tcp_listen_work(struct work_struct *work)
@@ -1501,7 +1527,7 @@ static int smc_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
 
        if (msg->msg_flags & MSG_FASTOPEN) {
                if (sk->sk_state == SMC_INIT) {
-                       smc->use_fallback = true;
+                       smc_switch_to_fallback(smc);
                        smc->fallback_rsn = SMC_CLC_DECL_OPTUNSUPP;
                } else {
                        rc = -EINVAL;
@@ -1571,8 +1597,8 @@ static __poll_t smc_poll(struct file *file, struct socket *sock,
                             poll_table *wait)
 {
        struct sock *sk = sock->sk;
-       __poll_t mask = 0;
        struct smc_sock *smc;
+       __poll_t mask = 0;
 
        if (!sk)
                return EPOLLNVAL;
@@ -1582,8 +1608,6 @@ static __poll_t smc_poll(struct file *file, struct socket *sock,
                /* delegate to CLC child sock */
                mask = smc->clcsock->ops->poll(file, smc->clcsock, wait);
                sk->sk_err = smc->clcsock->sk->sk_err;
-               if (sk->sk_err)
-                       mask |= EPOLLERR;
        } else {
                if (sk->sk_state != SMC_CLOSED)
                        sock_poll_wait(file, sock, wait);
@@ -1594,9 +1618,14 @@ static __poll_t smc_poll(struct file *file, struct socket *sock,
                        mask |= EPOLLHUP;
                if (sk->sk_state == SMC_LISTEN) {
                        /* woken up by sk_data_ready in smc_listen_work() */
-                       mask = smc_accept_poll(sk);
+                       mask |= smc_accept_poll(sk);
+               } else if (smc->use_fallback) { /* as result of connect_work()*/
+                       mask |= smc->clcsock->ops->poll(file, smc->clcsock,
+                                                          wait);
+                       sk->sk_err = smc->clcsock->sk->sk_err;
                } else {
-                       if (atomic_read(&smc->conn.sndbuf_space) ||
+                       if ((sk->sk_state != SMC_INIT &&
+                            atomic_read(&smc->conn.sndbuf_space)) ||
                            sk->sk_shutdown & SEND_SHUTDOWN) {
                                mask |= EPOLLOUT | EPOLLWRNORM;
                        } else {
@@ -1703,7 +1732,7 @@ static int smc_setsockopt(struct socket *sock, int level, int optname,
        case TCP_FASTOPEN_NO_COOKIE:
                /* option not supported by SMC */
                if (sk->sk_state == SMC_INIT) {
-                       smc->use_fallback = true;
+                       smc_switch_to_fallback(smc);
                        smc->fallback_rsn = SMC_CLC_DECL_OPTUNSUPP;
                } else {
                        if (!smc->use_fallback)
index adbdf195eb085f5507444340fb1f8ddcb69548d5..878313f8d6c1772849d0877f50e4190d9fe2170b 100644 (file)
@@ -190,18 +190,11 @@ struct smc_connection {
        u64                     peer_token;     /* SMC-D token of peer */
 };
 
-struct smc_connect_info {
-       int                     flags;
-       int                     alen;
-       struct sockaddr         addr;
-};
-
 struct smc_sock {                              /* smc sock container */
        struct sock             sk;
        struct socket           *clcsock;       /* internal tcp socket */
        struct smc_connection   conn;           /* smc connection */
        struct smc_sock         *listen_smc;    /* listen parent */
-       struct smc_connect_info *connect_info;  /* connect address & flags */
        struct work_struct      connect_work;   /* handle non-blocking connect*/
        struct work_struct      tcp_listen_work;/* handle tcp socket accepts */
        struct work_struct      smc_listen_work;/* prepare new accept socket */
@@ -219,6 +212,10 @@ struct smc_sock {                          /* smc sock container */
                                                 * started, waiting for unsent
                                                 * data to be sent
                                                 */
+       u8                      connect_nonblock : 1;
+                                               /* non-blocking connect in
+                                                * flight
+                                                */
        struct mutex            clcsock_release_lock;
                                                /* protects clcsock of a listen
                                                 * socket
index d53fd588d1f5a4450819934c83cf960dc29ff46b..745afd82f281186f2ca1b580081714893507595f 100644 (file)
@@ -385,8 +385,7 @@ int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info)
 
 /* send CLC PROPOSAL message across internal TCP socket */
 int smc_clc_send_proposal(struct smc_sock *smc, int smc_type,
-                         struct smc_ib_device *ibdev, u8 ibport, u8 gid[],
-                         struct smcd_dev *ismdev)
+                         struct smc_init_info *ini)
 {
        struct smc_clc_ipv6_prefix ipv6_prfx[SMC_CLC_MAX_V6_PREFIX];
        struct smc_clc_msg_proposal_prefix pclc_prfx;
@@ -416,8 +415,9 @@ int smc_clc_send_proposal(struct smc_sock *smc, int smc_type,
                /* add SMC-R specifics */
                memcpy(pclc.lcl.id_for_peer, local_systemid,
                       sizeof(local_systemid));
-               memcpy(&pclc.lcl.gid, gid, SMC_GID_SIZE);
-               memcpy(&pclc.lcl.mac, &ibdev->mac[ibport - 1], ETH_ALEN);
+               memcpy(&pclc.lcl.gid, ini->ib_gid, SMC_GID_SIZE);
+               memcpy(&pclc.lcl.mac, &ini->ib_dev->mac[ini->ib_port - 1],
+                      ETH_ALEN);
                pclc.iparea_offset = htons(0);
        }
        if (smc_type == SMC_TYPE_D || smc_type == SMC_TYPE_B) {
@@ -425,7 +425,7 @@ int smc_clc_send_proposal(struct smc_sock *smc, int smc_type,
                memset(&pclc_smcd, 0, sizeof(pclc_smcd));
                plen += sizeof(pclc_smcd);
                pclc.iparea_offset = htons(SMC_CLC_PROPOSAL_MAX_OFFSET);
-               pclc_smcd.gid = ismdev->local_gid;
+               pclc_smcd.gid = ini->ism_dev->local_gid;
        }
        pclc.hdr.length = htons(plen);
 
index 24658e8c0de42bfa182cd7379bb79850eb3936a5..ca209272e5fa698df643807174b1ce3501ed6215 100644 (file)
 #define SMC_CLC_DECL_CNFERR    0x03000000  /* configuration error            */
 #define SMC_CLC_DECL_PEERNOSMC 0x03010000  /* peer did not indicate SMC      */
 #define SMC_CLC_DECL_IPSEC     0x03020000  /* IPsec usage                    */
-#define SMC_CLC_DECL_NOSMCDEV  0x03030000  /* no SMC device found            */
+#define SMC_CLC_DECL_NOSMCDEV  0x03030000  /* no SMC device found (R or D)   */
+#define SMC_CLC_DECL_NOSMCDDEV 0x03030001  /* no SMC-D device found          */
+#define SMC_CLC_DECL_NOSMCRDEV 0x03030002  /* no SMC-R device found          */
+#define SMC_CLC_DECL_SMCDNOTALK        0x03030003  /* SMC-D dev can't talk to peer   */
 #define SMC_CLC_DECL_MODEUNSUPP        0x03040000  /* smc modes do not match (R or D)*/
 #define SMC_CLC_DECL_RMBE_EC   0x03050000  /* peer has eyecatcher in RMBE    */
 #define SMC_CLC_DECL_OPTUNSUPP 0x03060000  /* fastopen sockopt not supported */
+#define SMC_CLC_DECL_DIFFPREFIX        0x03070000  /* IP prefix / subnet mismatch    */
+#define SMC_CLC_DECL_GETVLANERR        0x03080000  /* err to get vlan id of ip device*/
+#define SMC_CLC_DECL_ISMVLANERR        0x03090000  /* err to reg vlan id on ism dev  */
 #define SMC_CLC_DECL_SYNCERR   0x04000000  /* synchronization error          */
 #define SMC_CLC_DECL_PEERDECL  0x05000000  /* peer declined during handshake */
-#define SMC_CLC_DECL_INTERR    0x99990000  /* internal error                 */
-#define SMC_CLC_DECL_ERR_RTOK  0x99990001  /*   rtoken handling failed       */
-#define SMC_CLC_DECL_ERR_RDYLNK        0x99990002  /*   ib ready link failed         */
-#define SMC_CLC_DECL_ERR_REGRMB        0x99990003  /*   reg rmb failed               */
+#define SMC_CLC_DECL_INTERR    0x09990000  /* internal error                 */
+#define SMC_CLC_DECL_ERR_RTOK  0x09990001  /*   rtoken handling failed       */
+#define SMC_CLC_DECL_ERR_RDYLNK        0x09990002  /*   ib ready link failed         */
+#define SMC_CLC_DECL_ERR_REGRMB        0x09990003  /*   reg rmb failed               */
 
 struct smc_clc_msg_hdr {       /* header1 of clc messages */
        u8 eyecatcher[4];       /* eye catcher */
@@ -179,6 +185,7 @@ smc_get_clc_msg_smcd(struct smc_clc_msg_proposal *prop)
 }
 
 struct smcd_dev;
+struct smc_init_info;
 
 int smc_clc_prfx_match(struct socket *clcsock,
                       struct smc_clc_msg_proposal_prefix *prop);
@@ -186,8 +193,7 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
                     u8 expected_type, unsigned long timeout);
 int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info);
 int smc_clc_send_proposal(struct smc_sock *smc, int smc_type,
-                         struct smc_ib_device *smcibdev, u8 ibport, u8 gid[],
-                         struct smcd_dev *ismdev);
+                         struct smc_init_info *ini);
 int smc_clc_send_confirm(struct smc_sock *smc);
 int smc_clc_send_accept(struct smc_sock *smc, int srv_first_contact);
 
index 2ad37e998509310f210f4e3654cc054487731e87..fc06720b53c1442a8dd3222ed7be482a8993ab92 100644 (file)
 
 #define SMC_CLOSE_WAIT_LISTEN_CLCSOCK_TIME     (5 * HZ)
 
+/* release the clcsock that is assigned to the smc_sock */
+void smc_clcsock_release(struct smc_sock *smc)
+{
+       struct socket *tcp;
+
+       if (smc->listen_smc && current_work() != &smc->smc_listen_work)
+               cancel_work_sync(&smc->smc_listen_work);
+       mutex_lock(&smc->clcsock_release_lock);
+       if (smc->clcsock) {
+               tcp = smc->clcsock;
+               smc->clcsock = NULL;
+               sock_release(tcp);
+       }
+       mutex_unlock(&smc->clcsock_release_lock);
+}
+
 static void smc_close_cleanup_listen(struct sock *parent)
 {
        struct sock *sk;
@@ -321,6 +337,7 @@ static void smc_close_passive_work(struct work_struct *work)
                                                   close_work);
        struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
        struct smc_cdc_conn_state_flags *rxflags;
+       bool release_clcsock = false;
        struct sock *sk = &smc->sk;
        int old_state;
 
@@ -400,13 +417,13 @@ static void smc_close_passive_work(struct work_struct *work)
                if ((sk->sk_state == SMC_CLOSED) &&
                    (sock_flag(sk, SOCK_DEAD) || !sk->sk_socket)) {
                        smc_conn_free(conn);
-                       if (smc->clcsock) {
-                               sock_release(smc->clcsock);
-                               smc->clcsock = NULL;
-                       }
+                       if (smc->clcsock)
+                               release_clcsock = true;
                }
        }
        release_sock(sk);
+       if (release_clcsock)
+               smc_clcsock_release(smc);
        sock_put(sk); /* sock_hold done by schedulers of close_work */
 }
 
index 19eb6a211c23cd12fad8f5077a26209bb05c3d33..e0e3b5df25d2474b8aadd2e7639d07e0c8c631ef 100644 (file)
@@ -23,5 +23,6 @@ void smc_close_wake_tx_prepared(struct smc_sock *smc);
 int smc_close_active(struct smc_sock *smc);
 int smc_close_shutdown_write(struct smc_sock *smc);
 void smc_close_init(struct smc_sock *smc);
+void smc_clcsock_release(struct smc_sock *smc);
 
 #endif /* SMC_CLOSE_H */
index 53a17cfa61af8d96e62fe11e9508584d71596061..2d2850adc2a3fa7a6a8667d79add5678beb130ca 100644 (file)
@@ -195,10 +195,7 @@ static void smc_lgr_free_work(struct work_struct *work)
 }
 
 /* create a new SMC link group */
-static int smc_lgr_create(struct smc_sock *smc, bool is_smcd,
-                         struct smc_ib_device *smcibdev, u8 ibport,
-                         char *peer_systemid, unsigned short vlan_id,
-                         struct smcd_dev *smcismdev, u64 peer_gid)
+static int smc_lgr_create(struct smc_sock *smc, struct smc_init_info *ini)
 {
        struct smc_link_group *lgr;
        struct smc_link *lnk;
@@ -206,20 +203,21 @@ static int smc_lgr_create(struct smc_sock *smc, bool is_smcd,
        int rc = 0;
        int i;
 
-       if (is_smcd && vlan_id) {
-               rc = smc_ism_get_vlan(smcismdev, vlan_id);
-               if (rc)
+       if (ini->is_smcd && ini->vlan_id) {
+               if (smc_ism_get_vlan(ini->ism_dev, ini->vlan_id)) {
+                       rc = SMC_CLC_DECL_ISMVLANERR;
                        goto out;
+               }
        }
 
        lgr = kzalloc(sizeof(*lgr), GFP_KERNEL);
        if (!lgr) {
-               rc = -ENOMEM;
+               rc = SMC_CLC_DECL_MEM;
                goto out;
        }
-       lgr->is_smcd = is_smcd;
+       lgr->is_smcd = ini->is_smcd;
        lgr->sync_err = 0;
-       lgr->vlan_id = vlan_id;
+       lgr->vlan_id = ini->vlan_id;
        rwlock_init(&lgr->sndbufs_lock);
        rwlock_init(&lgr->rmbs_lock);
        rwlock_init(&lgr->conns_lock);
@@ -231,29 +229,32 @@ static int smc_lgr_create(struct smc_sock *smc, bool is_smcd,
        memcpy(&lgr->id, (u8 *)&smc_lgr_list.num, SMC_LGR_ID_SIZE);
        INIT_DELAYED_WORK(&lgr->free_work, smc_lgr_free_work);
        lgr->conns_all = RB_ROOT;
-       if (is_smcd) {
+       if (ini->is_smcd) {
                /* SMC-D specific settings */
-               lgr->peer_gid = peer_gid;
-               lgr->smcd = smcismdev;
+               lgr->peer_gid = ini->ism_gid;
+               lgr->smcd = ini->ism_dev;
        } else {
                /* SMC-R specific settings */
                lgr->role = smc->listen_smc ? SMC_SERV : SMC_CLNT;
-               memcpy(lgr->peer_systemid, peer_systemid, SMC_SYSTEMID_LEN);
+               memcpy(lgr->peer_systemid, ini->ib_lcl->id_for_peer,
+                      SMC_SYSTEMID_LEN);
 
                lnk = &lgr->lnk[SMC_SINGLE_LINK];
                /* initialize link */
                lnk->state = SMC_LNK_ACTIVATING;
                lnk->link_id = SMC_SINGLE_LINK;
-               lnk->smcibdev = smcibdev;
-               lnk->ibport = ibport;
-               lnk->path_mtu = smcibdev->pattr[ibport - 1].active_mtu;
-               if (!smcibdev->initialized)
-                       smc_ib_setup_per_ibdev(smcibdev);
+               lnk->smcibdev = ini->ib_dev;
+               lnk->ibport = ini->ib_port;
+               lnk->path_mtu =
+                       ini->ib_dev->pattr[ini->ib_port - 1].active_mtu;
+               if (!ini->ib_dev->initialized)
+                       smc_ib_setup_per_ibdev(ini->ib_dev);
                get_random_bytes(rndvec, sizeof(rndvec));
                lnk->psn_initial = rndvec[0] + (rndvec[1] << 8) +
                        (rndvec[2] << 16);
                rc = smc_ib_determine_gid(lnk->smcibdev, lnk->ibport,
-                                         vlan_id, lnk->gid, &lnk->sgid_index);
+                                         ini->vlan_id, lnk->gid,
+                                         &lnk->sgid_index);
                if (rc)
                        goto free_lgr;
                rc = smc_llc_link_init(lnk);
@@ -289,6 +290,12 @@ static int smc_lgr_create(struct smc_sock *smc, bool is_smcd,
 free_lgr:
        kfree(lgr);
 out:
+       if (rc < 0) {
+               if (rc == -ENOMEM)
+                       rc = SMC_CLC_DECL_MEM;
+               else
+                       rc = SMC_CLC_DECL_INTERR;
+       }
        return rc;
 }
 
@@ -528,13 +535,13 @@ void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid, unsigned short vlan)
 /* Determine vlan of internal TCP socket.
  * @vlan_id: address to store the determined vlan id into
  */
-int smc_vlan_by_tcpsk(struct socket *clcsock, unsigned short *vlan_id)
+int smc_vlan_by_tcpsk(struct socket *clcsock, struct smc_init_info *ini)
 {
        struct dst_entry *dst = sk_dst_get(clcsock->sk);
        struct net_device *ndev;
        int i, nest_lvl, rc = 0;
 
-       *vlan_id = 0;
+       ini->vlan_id = 0;
        if (!dst) {
                rc = -ENOTCONN;
                goto out;
@@ -546,7 +553,7 @@ int smc_vlan_by_tcpsk(struct socket *clcsock, unsigned short *vlan_id)
 
        ndev = dst->dev;
        if (is_vlan_dev(ndev)) {
-               *vlan_id = vlan_dev_vlan_id(ndev);
+               ini->vlan_id = vlan_dev_vlan_id(ndev);
                goto out_rel;
        }
 
@@ -560,7 +567,7 @@ int smc_vlan_by_tcpsk(struct socket *clcsock, unsigned short *vlan_id)
                lower = lower->next;
                ndev = (struct net_device *)netdev_lower_get_next(ndev, &lower);
                if (is_vlan_dev(ndev)) {
-                       *vlan_id = vlan_dev_vlan_id(ndev);
+                       ini->vlan_id = vlan_dev_vlan_id(ndev);
                        break;
                }
        }
@@ -594,24 +601,16 @@ static bool smcd_lgr_match(struct smc_link_group *lgr,
 }
 
 /* create a new SMC connection (and a new link group if necessary) */
-int smc_conn_create(struct smc_sock *smc, bool is_smcd, int srv_first_contact,
-                   struct smc_ib_device *smcibdev, u8 ibport, u32 clcqpn,
-                   struct smc_clc_msg_local *lcl, struct smcd_dev *smcd,
-                   u64 peer_gid)
+int smc_conn_create(struct smc_sock *smc, struct smc_init_info *ini)
 {
        struct smc_connection *conn = &smc->conn;
-       int local_contact = SMC_FIRST_CONTACT;
        struct smc_link_group *lgr;
-       unsigned short vlan_id;
        enum smc_lgr_role role;
        int rc = 0;
 
+       ini->cln_first_contact = SMC_FIRST_CONTACT;
        role = smc->listen_smc ? SMC_SERV : SMC_CLNT;
-       rc = smc_vlan_by_tcpsk(smc->clcsock, &vlan_id);
-       if (rc)
-               return rc;
-
-       if ((role == SMC_CLNT) && srv_first_contact)
+       if (role == SMC_CLNT && ini->srv_first_contact)
                /* create new link group as well */
                goto create;
 
@@ -619,14 +618,15 @@ int smc_conn_create(struct smc_sock *smc, bool is_smcd, int srv_first_contact,
        spin_lock_bh(&smc_lgr_list.lock);
        list_for_each_entry(lgr, &smc_lgr_list.list, list) {
                write_lock_bh(&lgr->conns_lock);
-               if ((is_smcd ? smcd_lgr_match(lgr, smcd, peer_gid) :
-                    smcr_lgr_match(lgr, lcl, role, clcqpn)) &&
+               if ((ini->is_smcd ?
+                    smcd_lgr_match(lgr, ini->ism_dev, ini->ism_gid) :
+                    smcr_lgr_match(lgr, ini->ib_lcl, role, ini->ib_clcqpn)) &&
                    !lgr->sync_err &&
-                   lgr->vlan_id == vlan_id &&
+                   lgr->vlan_id == ini->vlan_id &&
                    (role == SMC_CLNT ||
                     lgr->conns_num < SMC_RMBS_PER_LGR_MAX)) {
                        /* link group found */
-                       local_contact = SMC_REUSE_CONTACT;
+                       ini->cln_first_contact = SMC_REUSE_CONTACT;
                        conn->lgr = lgr;
                        smc_lgr_register_conn(conn); /* add smc conn to lgr */
                        if (delayed_work_pending(&lgr->free_work))
@@ -638,19 +638,18 @@ int smc_conn_create(struct smc_sock *smc, bool is_smcd, int srv_first_contact,
        }
        spin_unlock_bh(&smc_lgr_list.lock);
 
-       if (role == SMC_CLNT && !srv_first_contact &&
-           (local_contact == SMC_FIRST_CONTACT)) {
+       if (role == SMC_CLNT && !ini->srv_first_contact &&
+           ini->cln_first_contact == SMC_FIRST_CONTACT) {
                /* Server reuses a link group, but Client wants to start
                 * a new one
                 * send out_of_sync decline, reason synchr. error
                 */
-               return -ENOLINK;
+               return SMC_CLC_DECL_SYNCERR;
        }
 
 create:
-       if (local_contact == SMC_FIRST_CONTACT) {
-               rc = smc_lgr_create(smc, is_smcd, smcibdev, ibport,
-                                   lcl->id_for_peer, vlan_id, smcd, peer_gid);
+       if (ini->cln_first_contact == SMC_FIRST_CONTACT) {
+               rc = smc_lgr_create(smc, ini);
                if (rc)
                        goto out;
                smc_lgr_register_conn(conn); /* add smc conn to lgr */
@@ -658,7 +657,7 @@ int smc_conn_create(struct smc_sock *smc, bool is_smcd, int srv_first_contact,
        conn->local_tx_ctrl.common.type = SMC_CDC_MSG_TYPE;
        conn->local_tx_ctrl.len = SMC_WR_TX_SIZE;
        conn->urg_state = SMC_URG_READ;
-       if (is_smcd) {
+       if (ini->is_smcd) {
                conn->rx_off = sizeof(struct smcd_cdc_msg);
                smcd_cdc_rx_init(conn); /* init tasklet for this conn */
        }
@@ -667,7 +666,7 @@ int smc_conn_create(struct smc_sock *smc, bool is_smcd, int srv_first_contact,
 #endif
 
 out:
-       return rc ? rc : local_contact;
+       return rc;
 }
 
 /* convert the RMB size into the compressed notation - minimum 16K.
index 8806d2afa6edb1178f4ad2904aa1346ee42bcae6..c00ac61dc129f8c39351a5666ab706fa5d013d19 100644 (file)
@@ -229,6 +229,24 @@ struct smc_link_group {
        };
 };
 
+struct smc_clc_msg_local;
+
+struct smc_init_info {
+       u8                      is_smcd;
+       unsigned short          vlan_id;
+       int                     srv_first_contact;
+       int                     cln_first_contact;
+       /* SMC-R */
+       struct smc_clc_msg_local *ib_lcl;
+       struct smc_ib_device    *ib_dev;
+       u8                      ib_gid[SMC_GID_SIZE];
+       u8                      ib_port;
+       u32                     ib_clcqpn;
+       /* SMC-D */
+       u64                     ism_gid;
+       struct smcd_dev         *ism_dev;
+};
+
 /* Find the connection associated with the given alert token in the link group.
  * To use rbtrees we have to implement our own search core.
  * Requires @conns_lock
@@ -281,13 +299,10 @@ void smc_sndbuf_sync_sg_for_cpu(struct smc_connection *conn);
 void smc_sndbuf_sync_sg_for_device(struct smc_connection *conn);
 void smc_rmb_sync_sg_for_cpu(struct smc_connection *conn);
 void smc_rmb_sync_sg_for_device(struct smc_connection *conn);
-int smc_vlan_by_tcpsk(struct socket *clcsock, unsigned short *vlan_id);
+int smc_vlan_by_tcpsk(struct socket *clcsock, struct smc_init_info *ini);
 
 void smc_conn_free(struct smc_connection *conn);
-int smc_conn_create(struct smc_sock *smc, bool is_smcd, int srv_first_contact,
-                   struct smc_ib_device *smcibdev, u8 ibport, u32 clcqpn,
-                   struct smc_clc_msg_local *lcl, struct smcd_dev *smcd,
-                   u64 peer_gid);
+int smc_conn_create(struct smc_sock *smc, struct smc_init_info *ini);
 void smcd_conn_free(struct smc_connection *conn);
 void smc_lgr_schedule_free_work_fast(struct smc_link_group *lgr);
 void smc_core_exit(void);
index 2fff79db1a59ce3d2908722941dd9355810c65a0..e89e918b88e09acaad980da8dc34e3d921fe69be 100644 (file)
@@ -289,6 +289,11 @@ struct smcd_dev *smcd_alloc_dev(struct device *parent, const char *name,
        INIT_LIST_HEAD(&smcd->vlan);
        smcd->event_wq = alloc_ordered_workqueue("ism_evt_wq-%s)",
                                                 WQ_MEM_RECLAIM, name);
+       if (!smcd->event_wq) {
+               kfree(smcd->conn);
+               kfree(smcd);
+               return NULL;
+       }
        return smcd;
 }
 EXPORT_SYMBOL_GPL(smcd_alloc_dev);
index 3cdf81cf97a346250ea4d408ecd14caff053afe8..9f5d8f36f2d74af0e4f2fdac5848b41b3a7a2c49 100644 (file)
@@ -26,6 +26,7 @@
 #include "smc_pnet.h"
 #include "smc_ib.h"
 #include "smc_ism.h"
+#include "smc_core.h"
 
 #define SMC_ASCII_BLANK 32
 
@@ -603,7 +604,8 @@ static int smc_pnet_flush(struct sk_buff *skb, struct genl_info *info)
 {
        struct net *net = genl_info_net(info);
 
-       return smc_pnet_remove_by_pnetid(net, NULL);
+       smc_pnet_remove_by_pnetid(net, NULL);
+       return 0;
 }
 
 /* SMC_PNETID generic netlink operation definition */
@@ -755,8 +757,7 @@ static int smc_pnet_find_ndev_pnetid_by_table(struct net_device *ndev,
  * IB device and port
  */
 static void smc_pnet_find_rdma_dev(struct net_device *netdev,
-                                  struct smc_ib_device **smcibdev,
-                                  u8 *ibport, unsigned short vlan_id, u8 gid[])
+                                  struct smc_init_info *ini)
 {
        struct smc_ib_device *ibdev;
 
@@ -776,10 +777,10 @@ static void smc_pnet_find_rdma_dev(struct net_device *netdev,
                        dev_put(ndev);
                        if (netdev == ndev &&
                            smc_ib_port_active(ibdev, i) &&
-                           !smc_ib_determine_gid(ibdev, i, vlan_id, gid,
-                                                 NULL)) {
-                               *smcibdev = ibdev;
-                               *ibport = i;
+                           !smc_ib_determine_gid(ibdev, i, ini->vlan_id,
+                                                 ini->ib_gid, NULL)) {
+                               ini->ib_dev = ibdev;
+                               ini->ib_port = i;
                                break;
                        }
                }
@@ -794,9 +795,7 @@ static void smc_pnet_find_rdma_dev(struct net_device *netdev,
  * If nothing found, try to use handshake device
  */
 static void smc_pnet_find_roce_by_pnetid(struct net_device *ndev,
-                                        struct smc_ib_device **smcibdev,
-                                        u8 *ibport, unsigned short vlan_id,
-                                        u8 gid[])
+                                        struct smc_init_info *ini)
 {
        u8 ndev_pnetid[SMC_MAX_PNETID_LEN];
        struct smc_ib_device *ibdev;
@@ -806,7 +805,7 @@ static void smc_pnet_find_roce_by_pnetid(struct net_device *ndev,
        if (smc_pnetid_by_dev_port(ndev->dev.parent, ndev->dev_port,
                                   ndev_pnetid) &&
            smc_pnet_find_ndev_pnetid_by_table(ndev, ndev_pnetid)) {
-               smc_pnet_find_rdma_dev(ndev, smcibdev, ibport, vlan_id, gid);
+               smc_pnet_find_rdma_dev(ndev, ini);
                return; /* pnetid could not be determined */
        }
 
@@ -817,10 +816,10 @@ static void smc_pnet_find_roce_by_pnetid(struct net_device *ndev,
                                continue;
                        if (smc_pnet_match(ibdev->pnetid[i - 1], ndev_pnetid) &&
                            smc_ib_port_active(ibdev, i) &&
-                           !smc_ib_determine_gid(ibdev, i, vlan_id, gid,
-                                                 NULL))  {
-                               *smcibdev = ibdev;
-                               *ibport = i;
+                           !smc_ib_determine_gid(ibdev, i, ini->vlan_id,
+                                                 ini->ib_gid, NULL)) {
+                               ini->ib_dev = ibdev;
+                               ini->ib_port = i;
                                goto out;
                        }
                }
@@ -830,7 +829,7 @@ static void smc_pnet_find_roce_by_pnetid(struct net_device *ndev,
 }
 
 static void smc_pnet_find_ism_by_pnetid(struct net_device *ndev,
-                                       struct smcd_dev **smcismdev)
+                                       struct smc_init_info *ini)
 {
        u8 ndev_pnetid[SMC_MAX_PNETID_LEN];
        struct smcd_dev *ismdev;
@@ -844,7 +843,7 @@ static void smc_pnet_find_ism_by_pnetid(struct net_device *ndev,
        spin_lock(&smcd_dev_list.lock);
        list_for_each_entry(ismdev, &smcd_dev_list.list, list) {
                if (smc_pnet_match(ismdev->pnetid, ndev_pnetid)) {
-                       *smcismdev = ismdev;
+                       ini->ism_dev = ismdev;
                        break;
                }
        }
@@ -855,21 +854,18 @@ static void smc_pnet_find_ism_by_pnetid(struct net_device *ndev,
  * determine ib_device and port belonging to used internal TCP socket
  * ethernet interface.
  */
-void smc_pnet_find_roce_resource(struct sock *sk,
-                                struct smc_ib_device **smcibdev, u8 *ibport,
-                                unsigned short vlan_id, u8 gid[])
+void smc_pnet_find_roce_resource(struct sock *sk, struct smc_init_info *ini)
 {
        struct dst_entry *dst = sk_dst_get(sk);
 
-       *smcibdev = NULL;
-       *ibport = 0;
-
+       ini->ib_dev = NULL;
+       ini->ib_port = 0;
        if (!dst)
                goto out;
        if (!dst->dev)
                goto out_rel;
 
-       smc_pnet_find_roce_by_pnetid(dst->dev, smcibdev, ibport, vlan_id, gid);
+       smc_pnet_find_roce_by_pnetid(dst->dev, ini);
 
 out_rel:
        dst_release(dst);
@@ -877,17 +873,17 @@ void smc_pnet_find_roce_resource(struct sock *sk,
        return;
 }
 
-void smc_pnet_find_ism_resource(struct sock *sk, struct smcd_dev **smcismdev)
+void smc_pnet_find_ism_resource(struct sock *sk, struct smc_init_info *ini)
 {
        struct dst_entry *dst = sk_dst_get(sk);
 
-       *smcismdev = NULL;
+       ini->ism_dev = NULL;
        if (!dst)
                goto out;
        if (!dst->dev)
                goto out_rel;
 
-       smc_pnet_find_ism_by_pnetid(dst->dev, smcismdev);
+       smc_pnet_find_ism_by_pnetid(dst->dev, ini);
 
 out_rel:
        dst_release(dst);
index 5eac42fb45d06c98eaf7180c3af6bf21021a5a17..4564e4d69c2e563d3c519434f01c4fb203781242 100644 (file)
@@ -18,6 +18,7 @@
 
 struct smc_ib_device;
 struct smcd_dev;
+struct smc_init_info;
 
 /**
  * struct smc_pnettable - SMC PNET table anchor
@@ -43,9 +44,7 @@ int smc_pnet_init(void) __init;
 int smc_pnet_net_init(struct net *net);
 void smc_pnet_exit(void);
 void smc_pnet_net_exit(struct net *net);
-void smc_pnet_find_roce_resource(struct sock *sk,
-                                struct smc_ib_device **smcibdev, u8 *ibport,
-                                unsigned short vlan_id, u8 gid[]);
-void smc_pnet_find_ism_resource(struct sock *sk, struct smcd_dev **smcismdev);
+void smc_pnet_find_roce_resource(struct sock *sk, struct smc_init_info *ini);
+void smc_pnet_find_ism_resource(struct sock *sk, struct smc_init_info *ini);
 
 #endif
index 860dcfb95ee472fed5d74e6015af2acce178c0a7..0ba363624339c9d21b2bc06418750cd952949d54 100644 (file)
@@ -140,13 +140,11 @@ static int __strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb,
                        /* We are going to append to the frags_list of head.
                         * Need to unshare the frag_list.
                         */
-                       if (skb_has_frag_list(head)) {
-                               err = skb_unclone(head, GFP_ATOMIC);
-                               if (err) {
-                                       STRP_STATS_INCR(strp->stats.mem_fail);
-                                       desc->error = err;
-                                       return 0;
-                               }
+                       err = skb_unclone(head, GFP_ATOMIC);
+                       if (err) {
+                               STRP_STATS_INCR(strp->stats.mem_fail);
+                               desc->error = err;
+                               return 0;
                        }
 
                        if (unlikely(skb_shinfo(head)->frag_list)) {
@@ -299,7 +297,7 @@ static int __strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb,
                        break;
                }
 
-               /* Positive extra indicates ore bytes than needed for the
+               /* Positive extra indicates more bytes than needed for the
                 * message
                 */
 
index 187d10443a1584e196245afc9837add06daa1c86..1d0395ef62c95b9285bdaca47f96f48a3d5093bf 100644 (file)
@@ -1540,7 +1540,6 @@ call_start(struct rpc_task *task)
        clnt->cl_stats->rpccnt++;
        task->tk_action = call_reserve;
        rpc_task_set_transport(task, clnt);
-       call_reserve(task);
 }
 
 /*
@@ -1554,9 +1553,6 @@ call_reserve(struct rpc_task *task)
        task->tk_status  = 0;
        task->tk_action  = call_reserveresult;
        xprt_reserve(task);
-       if (rpc_task_need_resched(task))
-               return;
-        call_reserveresult(task);
 }
 
 static void call_retry_reserve(struct rpc_task *task);
@@ -1579,7 +1575,6 @@ call_reserveresult(struct rpc_task *task)
        if (status >= 0) {
                if (task->tk_rqstp) {
                        task->tk_action = call_refresh;
-                       call_refresh(task);
                        return;
                }
 
@@ -1605,7 +1600,6 @@ call_reserveresult(struct rpc_task *task)
                /* fall through */
        case -EAGAIN:   /* woken up; retry */
                task->tk_action = call_retry_reserve;
-               call_retry_reserve(task);
                return;
        case -EIO:      /* probably a shutdown */
                break;
@@ -1628,9 +1622,6 @@ call_retry_reserve(struct rpc_task *task)
        task->tk_status  = 0;
        task->tk_action  = call_reserveresult;
        xprt_retry_reserve(task);
-       if (rpc_task_need_resched(task))
-               return;
-       call_reserveresult(task);
 }
 
 /*
@@ -1645,9 +1636,6 @@ call_refresh(struct rpc_task *task)
        task->tk_status = 0;
        task->tk_client->cl_stats->rpcauthrefresh++;
        rpcauth_refreshcred(task);
-       if (rpc_task_need_resched(task))
-               return;
-       call_refreshresult(task);
 }
 
 /*
@@ -1666,7 +1654,6 @@ call_refreshresult(struct rpc_task *task)
        case 0:
                if (rpcauth_uptodatecred(task)) {
                        task->tk_action = call_allocate;
-                       call_allocate(task);
                        return;
                }
                /* Use rate-limiting and a max number of retries if refresh
@@ -1685,7 +1672,6 @@ call_refreshresult(struct rpc_task *task)
                task->tk_cred_retry--;
                dprintk("RPC: %5u %s: retry refresh creds\n",
                                task->tk_pid, __func__);
-               call_refresh(task);
                return;
        }
        dprintk("RPC: %5u %s: refresh creds failed with error %d\n",
@@ -1711,10 +1697,8 @@ call_allocate(struct rpc_task *task)
        task->tk_status = 0;
        task->tk_action = call_encode;
 
-       if (req->rq_buffer) {
-               call_encode(task);
+       if (req->rq_buffer)
                return;
-       }
 
        if (proc->p_proc != 0) {
                BUG_ON(proc->p_arglen == 0);
@@ -1740,12 +1724,8 @@ call_allocate(struct rpc_task *task)
 
        status = xprt->ops->buf_alloc(task);
        xprt_inject_disconnect(xprt);
-       if (status == 0) {
-               if (rpc_task_need_resched(task))
-                       return;
-               call_encode(task);
+       if (status == 0)
                return;
-       }
        if (status != -ENOMEM) {
                rpc_exit(task, status);
                return;
@@ -1828,8 +1808,12 @@ call_encode(struct rpc_task *task)
                xprt_request_enqueue_receive(task);
        xprt_request_enqueue_transmit(task);
 out:
-       task->tk_action = call_bind;
-       call_bind(task);
+       task->tk_action = call_transmit;
+       /* Check that the connection is OK */
+       if (!xprt_bound(task->tk_xprt))
+               task->tk_action = call_bind;
+       else if (!xprt_connected(task->tk_xprt))
+               task->tk_action = call_connect;
 }
 
 /*
@@ -1847,7 +1831,6 @@ rpc_task_handle_transmitted(struct rpc_task *task)
 {
        xprt_end_transmit(task);
        task->tk_action = call_transmit_status;
-       call_transmit_status(task);
 }
 
 /*
@@ -1865,7 +1848,6 @@ call_bind(struct rpc_task *task)
 
        if (xprt_bound(xprt)) {
                task->tk_action = call_connect;
-               call_connect(task);
                return;
        }
 
@@ -1896,7 +1878,6 @@ call_bind_status(struct rpc_task *task)
                dprint_status(task);
                task->tk_status = 0;
                task->tk_action = call_connect;
-               call_connect(task);
                return;
        }
 
@@ -1981,7 +1962,6 @@ call_connect(struct rpc_task *task)
 
        if (xprt_connected(xprt)) {
                task->tk_action = call_transmit;
-               call_transmit(task);
                return;
        }
 
@@ -2051,7 +2031,6 @@ call_connect_status(struct rpc_task *task)
        case 0:
                clnt->cl_stats->netreconn++;
                task->tk_action = call_transmit;
-               call_transmit(task);
                return;
        }
        rpc_exit(task, status);
@@ -2087,9 +2066,6 @@ call_transmit(struct rpc_task *task)
                xprt_transmit(task);
        }
        xprt_end_transmit(task);
-       if (rpc_task_need_resched(task))
-               return;
-       call_transmit_status(task);
 }
 
 /*
@@ -2107,9 +2083,6 @@ call_transmit_status(struct rpc_task *task)
        if (rpc_task_transmitted(task)) {
                if (task->tk_status == 0)
                        xprt_request_wait_receive(task);
-               if (rpc_task_need_resched(task))
-                       return;
-               call_status(task);
                return;
        }
 
@@ -2170,7 +2143,6 @@ call_bc_encode(struct rpc_task *task)
 {
        xprt_request_enqueue_transmit(task);
        task->tk_action = call_bc_transmit;
-       call_bc_transmit(task);
 }
 
 /*
@@ -2261,7 +2233,6 @@ call_status(struct rpc_task *task)
        status = task->tk_status;
        if (status >= 0) {
                task->tk_action = call_decode;
-               call_decode(task);
                return;
        }
 
index 89a63391d4d442f6d390556aa8cf0b5a2a41357a..30cfc0efe6990aa5f693b56c26c257bd968f43e5 100644 (file)
@@ -90,7 +90,7 @@ static void rpcrdma_xprt_drain(struct rpcrdma_xprt *r_xprt)
        /* Flush Receives, then wait for deferred Reply work
         * to complete.
         */
-       ib_drain_qp(ia->ri_id->qp);
+       ib_drain_rq(ia->ri_id->qp);
        drain_workqueue(buf->rb_completion_wq);
 
        /* Deferred Reply processing might have scheduled
index 76e14dc08bb9fc23dbb2358d4559b97966f35818..6c997d4a62189347650c43b9d2891a6883b0be6f 100644 (file)
@@ -769,6 +769,9 @@ void tipc_mcast_filter_msg(struct net *net, struct sk_buff_head *defq,
        u32 node, port;
 
        skb = skb_peek(inputq);
+       if (!skb)
+               return;
+
        hdr = buf_msg(skb);
 
        if (likely(!msg_is_syn(hdr) && skb_queue_empty(defq)))
index 52d23b3ffaf5c450747276376978d58c724042b3..6053489c8063633ef7f206977c5ca27985edf5ff 100644 (file)
@@ -151,6 +151,7 @@ struct tipc_link {
        /* Failover/synch */
        u16 drop_point;
        struct sk_buff *failover_reasm_skb;
+       struct sk_buff_head failover_deferdq;
 
        /* Max packet negotiation */
        u16 mtu;
@@ -209,6 +210,7 @@ enum {
 };
 
 #define TIPC_BC_RETR_LIM msecs_to_jiffies(10)   /* [ms] */
+#define TIPC_UC_RETR_TIME (jiffies + msecs_to_jiffies(1))
 
 /*
  * Interval between NACKs when packets arrive out of order
@@ -246,6 +248,10 @@ static int tipc_link_build_nack_msg(struct tipc_link *l,
 static void tipc_link_build_bc_init_msg(struct tipc_link *l,
                                        struct sk_buff_head *xmitq);
 static bool tipc_link_release_pkts(struct tipc_link *l, u16 to);
+static u16 tipc_build_gap_ack_blks(struct tipc_link *l, void *data);
+static void tipc_link_advance_transmq(struct tipc_link *l, u16 acked, u16 gap,
+                                     struct tipc_gap_ack_blks *ga,
+                                     struct sk_buff_head *xmitq);
 
 /*
  *  Simple non-static link routines (i.e. referenced outside this file)
@@ -493,6 +499,7 @@ bool tipc_link_create(struct net *net, char *if_name, int bearer_id,
        __skb_queue_head_init(&l->transmq);
        __skb_queue_head_init(&l->backlogq);
        __skb_queue_head_init(&l->deferdq);
+       __skb_queue_head_init(&l->failover_deferdq);
        skb_queue_head_init(&l->wakeupq);
        skb_queue_head_init(l->inputq);
        return true;
@@ -869,6 +876,8 @@ void tipc_link_reset(struct tipc_link *l)
        __skb_queue_head_init(&list);
 
        l->in_session = false;
+       /* Force re-synch of peer session number before establishing */
+       l->peer_session--;
        l->session++;
        l->mtu = l->advertised_mtu;
 
@@ -883,6 +892,7 @@ void tipc_link_reset(struct tipc_link *l)
        __skb_queue_purge(&l->transmq);
        __skb_queue_purge(&l->deferdq);
        __skb_queue_purge(&l->backlogq);
+       __skb_queue_purge(&l->failover_deferdq);
        l->backlog[TIPC_LOW_IMPORTANCE].len = 0;
        l->backlog[TIPC_MEDIUM_IMPORTANCE].len = 0;
        l->backlog[TIPC_HIGH_IMPORTANCE].len = 0;
@@ -1154,34 +1164,14 @@ static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb,
  * Consumes buffer
  */
 static int tipc_link_input(struct tipc_link *l, struct sk_buff *skb,
-                          struct sk_buff_head *inputq)
+                          struct sk_buff_head *inputq,
+                          struct sk_buff **reasm_skb)
 {
        struct tipc_msg *hdr = buf_msg(skb);
-       struct sk_buff **reasm_skb = &l->reasm_buf;
        struct sk_buff *iskb;
        struct sk_buff_head tmpq;
        int usr = msg_user(hdr);
-       int rc = 0;
        int pos = 0;
-       int ipos = 0;
-
-       if (unlikely(usr == TUNNEL_PROTOCOL)) {
-               if (msg_type(hdr) == SYNCH_MSG) {
-                       __skb_queue_purge(&l->deferdq);
-                       goto drop;
-               }
-               if (!tipc_msg_extract(skb, &iskb, &ipos))
-                       return rc;
-               kfree_skb(skb);
-               skb = iskb;
-               hdr = buf_msg(skb);
-               if (less(msg_seqno(hdr), l->drop_point))
-                       goto drop;
-               if (tipc_data_input(l, skb, inputq))
-                       return rc;
-               usr = msg_user(hdr);
-               reasm_skb = &l->failover_reasm_skb;
-       }
 
        if (usr == MSG_BUNDLER) {
                skb_queue_head_init(&tmpq);
@@ -1206,11 +1196,66 @@ static int tipc_link_input(struct tipc_link *l, struct sk_buff *skb,
                tipc_link_bc_init_rcv(l->bc_rcvlink, hdr);
                tipc_bcast_unlock(l->net);
        }
-drop:
+
        kfree_skb(skb);
        return 0;
 }
 
+/* tipc_link_tnl_rcv() - receive TUNNEL_PROTOCOL message, drop or process the
+ *                      inner message along with the ones in the old link's
+ *                      deferdq
+ * @l: tunnel link
+ * @skb: TUNNEL_PROTOCOL message
+ * @inputq: queue to put messages ready for delivery
+ */
+static int tipc_link_tnl_rcv(struct tipc_link *l, struct sk_buff *skb,
+                            struct sk_buff_head *inputq)
+{
+       struct sk_buff **reasm_skb = &l->failover_reasm_skb;
+       struct sk_buff_head *fdefq = &l->failover_deferdq;
+       struct tipc_msg *hdr = buf_msg(skb);
+       struct sk_buff *iskb;
+       int ipos = 0;
+       int rc = 0;
+       u16 seqno;
+
+       /* SYNCH_MSG */
+       if (msg_type(hdr) == SYNCH_MSG)
+               goto drop;
+
+       /* FAILOVER_MSG */
+       if (!tipc_msg_extract(skb, &iskb, &ipos)) {
+               pr_warn_ratelimited("Cannot extract FAILOVER_MSG, defq: %d\n",
+                                   skb_queue_len(fdefq));
+               return rc;
+       }
+
+       do {
+               seqno = buf_seqno(iskb);
+
+               if (unlikely(less(seqno, l->drop_point))) {
+                       kfree_skb(iskb);
+                       continue;
+               }
+
+               if (unlikely(seqno != l->drop_point)) {
+                       __tipc_skb_queue_sorted(fdefq, seqno, iskb);
+                       continue;
+               }
+
+               l->drop_point++;
+
+               if (!tipc_data_input(l, iskb, inputq))
+                       rc |= tipc_link_input(l, iskb, inputq, reasm_skb);
+               if (unlikely(rc))
+                       break;
+       } while ((iskb = __tipc_skb_dequeue(fdefq, l->drop_point)));
+
+drop:
+       kfree_skb(skb);
+       return rc;
+}
+
 static bool tipc_link_release_pkts(struct tipc_link *l, u16 acked)
 {
        bool released = false;
@@ -1226,6 +1271,106 @@ static bool tipc_link_release_pkts(struct tipc_link *l, u16 acked)
        return released;
 }
 
+/* tipc_build_gap_ack_blks - build Gap ACK blocks
+ * @l: tipc link that data have come with gaps in sequence if any
+ * @data: data buffer to store the Gap ACK blocks after built
+ *
+ * returns the actual allocated memory size
+ */
+static u16 tipc_build_gap_ack_blks(struct tipc_link *l, void *data)
+{
+       struct sk_buff *skb = skb_peek(&l->deferdq);
+       struct tipc_gap_ack_blks *ga = data;
+       u16 len, expect, seqno = 0;
+       u8 n = 0;
+
+       if (!skb)
+               goto exit;
+
+       expect = buf_seqno(skb);
+       skb_queue_walk(&l->deferdq, skb) {
+               seqno = buf_seqno(skb);
+               if (unlikely(more(seqno, expect))) {
+                       ga->gacks[n].ack = htons(expect - 1);
+                       ga->gacks[n].gap = htons(seqno - expect);
+                       if (++n >= MAX_GAP_ACK_BLKS) {
+                               pr_info_ratelimited("Too few Gap ACK blocks!\n");
+                               goto exit;
+                       }
+               } else if (unlikely(less(seqno, expect))) {
+                       pr_warn("Unexpected skb in deferdq!\n");
+                       continue;
+               }
+               expect = seqno + 1;
+       }
+
+       /* last block */
+       ga->gacks[n].ack = htons(seqno);
+       ga->gacks[n].gap = 0;
+       n++;
+
+exit:
+       len = tipc_gap_ack_blks_sz(n);
+       ga->len = htons(len);
+       ga->gack_cnt = n;
+       return len;
+}
+
+/* tipc_link_advance_transmq - advance TIPC link transmq queue by releasing
+ *                            acked packets, also doing retransmissions if
+ *                            gaps found
+ * @l: tipc link with transmq queue to be advanced
+ * @acked: seqno of last packet acked by peer without any gaps before
+ * @gap: # of gap packets
+ * @ga: buffer pointer to Gap ACK blocks from peer
+ * @xmitq: queue for accumulating the retransmitted packets if any
+ */
+static void tipc_link_advance_transmq(struct tipc_link *l, u16 acked, u16 gap,
+                                     struct tipc_gap_ack_blks *ga,
+                                     struct sk_buff_head *xmitq)
+{
+       struct sk_buff *skb, *_skb, *tmp;
+       struct tipc_msg *hdr;
+       u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
+       u16 ack = l->rcv_nxt - 1;
+       u16 seqno;
+       u16 n = 0;
+
+       skb_queue_walk_safe(&l->transmq, skb, tmp) {
+               seqno = buf_seqno(skb);
+
+next_gap_ack:
+               if (less_eq(seqno, acked)) {
+                       /* release skb */
+                       __skb_unlink(skb, &l->transmq);
+                       kfree_skb(skb);
+               } else if (less_eq(seqno, acked + gap)) {
+                       /* retransmit skb */
+                       if (time_before(jiffies, TIPC_SKB_CB(skb)->nxt_retr))
+                               continue;
+                       TIPC_SKB_CB(skb)->nxt_retr = TIPC_UC_RETR_TIME;
+
+                       _skb = __pskb_copy(skb, MIN_H_SIZE, GFP_ATOMIC);
+                       if (!_skb)
+                               continue;
+                       hdr = buf_msg(_skb);
+                       msg_set_ack(hdr, ack);
+                       msg_set_bcast_ack(hdr, bc_ack);
+                       _skb->priority = TC_PRIO_CONTROL;
+                       __skb_queue_tail(xmitq, _skb);
+                       l->stats.retransmitted++;
+               } else {
+                       /* retry with Gap ACK blocks if any */
+                       if (!ga || n >= ga->gack_cnt)
+                               break;
+                       acked = ntohs(ga->gacks[n].ack);
+                       gap = ntohs(ga->gacks[n].gap);
+                       n++;
+                       goto next_gap_ack;
+               }
+       }
+}
+
 /* tipc_link_build_state_msg: prepare link state message for transmission
  *
  * Note that sending of broadcast ack is coordinated among nodes, to reduce
@@ -1280,6 +1425,7 @@ static int tipc_link_build_nack_msg(struct tipc_link *l,
                                    struct sk_buff_head *xmitq)
 {
        u32 def_cnt = ++l->stats.deferred_recv;
+       u32 defq_len = skb_queue_len(&l->deferdq);
        int match1, match2;
 
        if (link_is_bc_rcvlink(l)) {
@@ -1290,7 +1436,7 @@ static int tipc_link_build_nack_msg(struct tipc_link *l,
                return 0;
        }
 
-       if ((skb_queue_len(&l->deferdq) == 1) || !(def_cnt % TIPC_NACK_INTV))
+       if (defq_len >= 3 && !((defq_len - 3) % 16))
                tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, 0, xmitq);
        return 0;
 }
@@ -1304,29 +1450,29 @@ int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
                  struct sk_buff_head *xmitq)
 {
        struct sk_buff_head *defq = &l->deferdq;
-       struct tipc_msg *hdr;
+       struct tipc_msg *hdr = buf_msg(skb);
        u16 seqno, rcv_nxt, win_lim;
        int rc = 0;
 
+       /* Verify and update link state */
+       if (unlikely(msg_user(hdr) == LINK_PROTOCOL))
+               return tipc_link_proto_rcv(l, skb, xmitq);
+
+       /* Don't send probe at next timeout expiration */
+       l->silent_intv_cnt = 0;
+
        do {
                hdr = buf_msg(skb);
                seqno = msg_seqno(hdr);
                rcv_nxt = l->rcv_nxt;
                win_lim = rcv_nxt + TIPC_MAX_LINK_WIN;
 
-               /* Verify and update link state */
-               if (unlikely(msg_user(hdr) == LINK_PROTOCOL))
-                       return tipc_link_proto_rcv(l, skb, xmitq);
-
                if (unlikely(!link_is_up(l))) {
                        if (l->state == LINK_ESTABLISHING)
                                rc = TIPC_LINK_UP_EVT;
                        goto drop;
                }
 
-               /* Don't send probe at next timeout expiration */
-               l->silent_intv_cnt = 0;
-
                /* Drop if outside receive window */
                if (unlikely(less(seqno, rcv_nxt) || more(seqno, win_lim))) {
                        l->stats.duplicates++;
@@ -1351,13 +1497,16 @@ int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
                /* Deliver packet */
                l->rcv_nxt++;
                l->stats.recv_pkts++;
-               if (!tipc_data_input(l, skb, l->inputq))
-                       rc |= tipc_link_input(l, skb, l->inputq);
+
+               if (unlikely(msg_user(hdr) == TUNNEL_PROTOCOL))
+                       rc |= tipc_link_tnl_rcv(l, skb, l->inputq);
+               else if (!tipc_data_input(l, skb, l->inputq))
+                       rc |= tipc_link_input(l, skb, l->inputq, &l->reasm_buf);
                if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN))
                        rc |= tipc_link_build_state_msg(l, xmitq);
                if (unlikely(rc & ~TIPC_LINK_SND_STATE))
                        break;
-       } while ((skb = __skb_dequeue(defq)));
+       } while ((skb = __tipc_skb_dequeue(defq, l->rcv_nxt)));
 
        return rc;
 drop:
@@ -1378,6 +1527,7 @@ static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
        struct tipc_mon_state *mstate = &l->mon_state;
        int dlen = 0;
        void *data;
+       u16 glen = 0;
 
        /* Don't send protocol message during reset or link failover */
        if (tipc_link_is_blocked(l))
@@ -1390,8 +1540,8 @@ static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
                rcvgap = buf_seqno(skb_peek(dfq)) - l->rcv_nxt;
 
        skb = tipc_msg_create(LINK_PROTOCOL, mtyp, INT_H_SIZE,
-                             tipc_max_domain_size, l->addr,
-                             tipc_own_addr(l->net), 0, 0, 0);
+                             tipc_max_domain_size + MAX_GAP_ACK_BLKS_SZ,
+                             l->addr, tipc_own_addr(l->net), 0, 0, 0);
        if (!skb)
                return;
 
@@ -1418,9 +1568,11 @@ static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
                msg_set_bc_gap(hdr, link_bc_rcv_gap(bcl));
                msg_set_probe(hdr, probe);
                msg_set_is_keepalive(hdr, probe || probe_reply);
-               tipc_mon_prep(l->net, data, &dlen, mstate, l->bearer_id);
-               msg_set_size(hdr, INT_H_SIZE + dlen);
-               skb_trim(skb, INT_H_SIZE + dlen);
+               if (l->peer_caps & TIPC_GAP_ACK_BLOCK)
+                       glen = tipc_build_gap_ack_blks(l, data);
+               tipc_mon_prep(l->net, data + glen, &dlen, mstate, l->bearer_id);
+               msg_set_size(hdr, INT_H_SIZE + glen + dlen);
+               skb_trim(skb, INT_H_SIZE + glen + dlen);
                l->stats.sent_states++;
                l->rcv_unacked = 0;
        } else {
@@ -1479,6 +1631,7 @@ void tipc_link_create_dummy_tnl_msg(struct tipc_link *l,
 void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
                           int mtyp, struct sk_buff_head *xmitq)
 {
+       struct sk_buff_head *fdefq = &tnl->failover_deferdq;
        struct sk_buff *skb, *tnlskb;
        struct tipc_msg *hdr, tnlhdr;
        struct sk_buff_head *queue = &l->transmq;
@@ -1506,7 +1659,11 @@ void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
        /* Initialize reusable tunnel packet header */
        tipc_msg_init(tipc_own_addr(l->net), &tnlhdr, TUNNEL_PROTOCOL,
                      mtyp, INT_H_SIZE, l->addr);
-       pktcnt = skb_queue_len(&l->transmq) + skb_queue_len(&l->backlogq);
+       if (mtyp == SYNCH_MSG)
+               pktcnt = l->snd_nxt - buf_seqno(skb_peek(&l->transmq));
+       else
+               pktcnt = skb_queue_len(&l->transmq);
+       pktcnt += skb_queue_len(&l->backlogq);
        msg_set_msgcnt(&tnlhdr, pktcnt);
        msg_set_bearer_id(&tnlhdr, l->peer_bearer_id);
 tnl:
@@ -1537,6 +1694,14 @@ void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
                tnl->drop_point = l->rcv_nxt;
                tnl->failover_reasm_skb = l->reasm_buf;
                l->reasm_buf = NULL;
+
+               /* Failover the link's deferdq */
+               if (unlikely(!skb_queue_empty(fdefq))) {
+                       pr_warn("Link failover deferdq not empty: %d!\n",
+                               skb_queue_len(fdefq));
+                       __skb_queue_purge(fdefq);
+               }
+               skb_queue_splice_init(&l->deferdq, fdefq);
        }
 }
 
@@ -1590,6 +1755,7 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
                               struct sk_buff_head *xmitq)
 {
        struct tipc_msg *hdr = buf_msg(skb);
+       struct tipc_gap_ack_blks *ga = NULL;
        u16 rcvgap = 0;
        u16 ack = msg_ack(hdr);
        u16 gap = msg_seq_gap(hdr);
@@ -1600,6 +1766,7 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
        u16 dlen = msg_data_sz(hdr);
        int mtyp = msg_type(hdr);
        bool reply = msg_probe(hdr);
+       u16 glen = 0;
        void *data;
        char *if_name;
        int rc = 0;
@@ -1697,7 +1864,17 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
                                rc = TIPC_LINK_UP_EVT;
                        break;
                }
-               tipc_mon_rcv(l->net, data, dlen, l->addr,
+
+               /* Receive Gap ACK blocks from peer if any */
+               if (l->peer_caps & TIPC_GAP_ACK_BLOCK) {
+                       ga = (struct tipc_gap_ack_blks *)data;
+                       glen = ntohs(ga->len);
+                       /* sanity check: if failed, ignore Gap ACK blocks */
+                       if (glen != tipc_gap_ack_blks_sz(ga->gack_cnt))
+                               ga = NULL;
+               }
+
+               tipc_mon_rcv(l->net, data + glen, dlen - glen, l->addr,
                             &l->mon_state, l->bearer_id);
 
                /* Send NACK if peer has sent pkts we haven't received yet */
@@ -1706,13 +1883,12 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
                if (rcvgap || reply)
                        tipc_link_build_proto_msg(l, STATE_MSG, 0, reply,
                                                  rcvgap, 0, 0, xmitq);
-               tipc_link_release_pkts(l, ack);
+
+               tipc_link_advance_transmq(l, ack, gap, ga, xmitq);
 
                /* If NACK, retransmit will now start at right position */
-               if (gap) {
-                       rc = tipc_link_retrans(l, l, ack + 1, ack + gap, xmitq);
+               if (gap)
                        l->stats.recv_nacks++;
-               }
 
                tipc_link_advance_backlog(l, xmitq);
                if (unlikely(!skb_queue_empty(&l->wakeupq)))
index 528ba9241acc23aeaceed68c74527e1827f5e40d..8de02ad6e352cc17b0c825530ba613e586eebc09 100644 (file)
@@ -117,6 +117,37 @@ struct tipc_msg {
        __be32 hdr[15];
 };
 
+/* struct tipc_gap_ack - TIPC Gap ACK block
+ * @ack: seqno of the last consecutive packet in link deferdq
+ * @gap: number of gap packets since the last ack
+ *
+ * E.g:
+ *       link deferdq: 1 2 3 4      10 11      13 14 15       20
+ * --> Gap ACK blocks:      <4, 5>,   <11, 1>,      <15, 4>, <20, 0>
+ */
+struct tipc_gap_ack {
+       __be16 ack;
+       __be16 gap;
+};
+
+/* struct tipc_gap_ack_blks
+ * @len: actual length of the record
+ * @gack_cnt: number of Gap ACK blocks in the record
+ * @gacks: array of Gap ACK blocks
+ */
+struct tipc_gap_ack_blks {
+       __be16 len;
+       u8 gack_cnt;
+       u8 reserved;
+       struct tipc_gap_ack gacks[];
+};
+
+#define tipc_gap_ack_blks_sz(n) (sizeof(struct tipc_gap_ack_blks) + \
+                                sizeof(struct tipc_gap_ack) * (n))
+
+#define MAX_GAP_ACK_BLKS       32
+#define MAX_GAP_ACK_BLKS_SZ    tipc_gap_ack_blks_sz(MAX_GAP_ACK_BLKS)
+
 static inline struct tipc_msg *buf_msg(struct sk_buff *skb)
 {
        return (struct tipc_msg *)skb->data;
@@ -1120,4 +1151,25 @@ static inline void tipc_skb_queue_splice_tail_init(struct sk_buff_head *list,
        tipc_skb_queue_splice_tail(&tmp, head);
 }
 
+/* __tipc_skb_dequeue() - dequeue the head skb according to expected seqno
+ * @list: list to be dequeued from
+ * @seqno: seqno of the expected msg
+ *
+ * returns skb dequeued from the list if its seqno is less than or equal to
+ * the expected one, otherwise the skb is still hold
+ *
+ * Note: must be used with appropriate locks held only
+ */
+static inline struct sk_buff *__tipc_skb_dequeue(struct sk_buff_head *list,
+                                                u16 seqno)
+{
+       struct sk_buff *skb = skb_peek(list);
+
+       if (skb && less_eq(buf_seqno(skb), seqno)) {
+               __skb_unlink(skb, list);
+               return skb;
+       }
+       return NULL;
+}
+
 #endif
index bff241f0352501aba8605622df16f2c85044c09b..89993afe0fbd38713dd3d0499cc79e6c3e159b4d 100644 (file)
@@ -909,7 +909,8 @@ static int tipc_nl_service_list(struct net *net, struct tipc_nl_msg *msg,
        for (; i < TIPC_NAMETBL_SIZE; i++) {
                head = &tn->nametbl->services[i];
 
-               if (*last_type) {
+               if (*last_type ||
+                   (!i && *last_key && (*last_lower == *last_key))) {
                        service = tipc_service_find(net, *last_type);
                        if (!service)
                                return -EPIPE;
index 4ad3586da8f028c0fb8244382b343a1c7635f6cb..340a6e7c43a7d39c596de4f1b0045e4200edc2a3 100644 (file)
@@ -267,8 +267,14 @@ static int tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
        if (msg->rep_type)
                tipc_tlv_init(msg->rep, msg->rep_type);
 
-       if (cmd->header)
-               (*cmd->header)(msg);
+       if (cmd->header) {
+               err = (*cmd->header)(msg);
+               if (err) {
+                       kfree_skb(msg->rep);
+                       msg->rep = NULL;
+                       return err;
+               }
+       }
 
        arg = nlmsg_new(0, GFP_KERNEL);
        if (!arg) {
@@ -397,7 +403,12 @@ static int tipc_nl_compat_bearer_enable(struct tipc_nl_compat_cmd_doit *cmd,
        if (!bearer)
                return -EMSGSIZE;
 
-       len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_BEARER_NAME);
+       len = TLV_GET_DATA_LEN(msg->req);
+       len -= offsetof(struct tipc_bearer_config, name);
+       if (len <= 0)
+               return -EINVAL;
+
+       len = min_t(int, len, TIPC_MAX_BEARER_NAME);
        if (!string_is_valid(b->name, len))
                return -EINVAL;
 
@@ -766,7 +777,12 @@ static int tipc_nl_compat_link_set(struct tipc_nl_compat_cmd_doit *cmd,
 
        lc = (struct tipc_link_config *)TLV_DATA(msg->req);
 
-       len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
+       len = TLV_GET_DATA_LEN(msg->req);
+       len -= offsetof(struct tipc_link_config, name);
+       if (len <= 0)
+               return -EINVAL;
+
+       len = min_t(int, len, TIPC_MAX_LINK_NAME);
        if (!string_is_valid(lc->name, len))
                return -EINVAL;
 
index 3469b5d4ed32c8481cf97ea8153e241d28cb917c..7478e2d4ec0278d3539f1725889445f26e291395 100644 (file)
@@ -375,14 +375,15 @@ static struct tipc_node *tipc_node_create(struct net *net, u32 addr,
                if (n->capabilities == capabilities)
                        goto exit;
                /* Same node may come back with new capabilities */
-               write_lock_bh(&n->lock);
+               tipc_node_write_lock(n);
                n->capabilities = capabilities;
                for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) {
                        l = n->links[bearer_id].link;
                        if (l)
                                tipc_link_update_caps(l, capabilities);
                }
-               write_unlock_bh(&n->lock);
+               tipc_node_write_unlock_fast(n);
+
                /* Calculate cluster capabilities */
                tn->capabilities = TIPC_NODE_CAPABILITIES;
                list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
index 2404225c5d58ba84ead791f1a1343712cd845566..c0bf49ea3de46ce91ba1757c8efe912d077da92d 100644 (file)
@@ -52,7 +52,8 @@ enum {
        TIPC_BCAST_RCAST      = (1 << 4),
        TIPC_NODE_ID128       = (1 << 5),
        TIPC_LINK_PROTO_SEQNO = (1 << 6),
-       TIPC_MCAST_RBCTL      = (1 << 7)
+       TIPC_MCAST_RBCTL      = (1 << 7),
+       TIPC_GAP_ACK_BLOCK    = (1 << 8)
 };
 
 #define TIPC_NODE_CAPABILITIES (TIPC_SYN_BIT           |  \
@@ -62,7 +63,8 @@ enum {
                                TIPC_BLOCK_FLOWCTL     |   \
                                TIPC_NODE_ID128        |   \
                                TIPC_LINK_PROTO_SEQNO  |   \
-                               TIPC_MCAST_RBCTL)
+                               TIPC_MCAST_RBCTL       |   \
+                               TIPC_GAP_ACK_BLOCK)
 #define INVALID_BEARER_ID -1
 
 void tipc_node_stop(struct net *net);
index 3481e4906bd6a4a3e1f27ec5d49106090c7ec7f1..9df82a573aa7768f583999e740022ce00295bbd4 100644 (file)
@@ -38,6 +38,8 @@
 
 #include <linux/sysctl.h>
 
+static int zero;
+static int one = 1;
 static struct ctl_table_header *tipc_ctl_hdr;
 
 static struct ctl_table tipc_table[] = {
@@ -46,14 +48,16 @@ static struct ctl_table tipc_table[] = {
                .data           = &sysctl_tipc_rmem,
                .maxlen         = sizeof(sysctl_tipc_rmem),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec,
+               .proc_handler   = proc_dointvec_minmax,
+               .extra1         = &one,
        },
        {
                .procname       = "named_timeout",
                .data           = &sysctl_tipc_named_timeout,
                .maxlen         = sizeof(sysctl_tipc_named_timeout),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec,
+               .proc_handler   = proc_dointvec_minmax,
+               .extra1         = &zero,
        },
        {
                .procname       = "sk_filter",
index 135a7ee9db034149252f8df3a56f7834ff573eab..9f3bdbc1e59348cf049c0cca9959ee5f413e41cb 100644 (file)
@@ -52,8 +52,11 @@ static DEFINE_SPINLOCK(tls_device_lock);
 
 static void tls_device_free_ctx(struct tls_context *ctx)
 {
-       if (ctx->tx_conf == TLS_HW)
+       if (ctx->tx_conf == TLS_HW) {
                kfree(tls_offload_ctx_tx(ctx));
+               kfree(ctx->tx.rec_seq);
+               kfree(ctx->tx.iv);
+       }
 
        if (ctx->rx_conf == TLS_HW)
                kfree(tls_offload_ctx_rx(ctx));
@@ -216,6 +219,13 @@ void tls_device_sk_destruct(struct sock *sk)
 }
 EXPORT_SYMBOL(tls_device_sk_destruct);
 
+void tls_device_free_resources_tx(struct sock *sk)
+{
+       struct tls_context *tls_ctx = tls_get_ctx(sk);
+
+       tls_free_partial_record(sk, tls_ctx);
+}
+
 static void tls_append_frag(struct tls_record_info *record,
                            struct page_frag *pfrag,
                            int size)
index 0e24edab25356acb79c8407116e0fd28b92759c9..7e546b8ec0004304d263e6b289317c6a2d70a4a4 100644 (file)
@@ -208,6 +208,26 @@ int tls_push_partial_record(struct sock *sk, struct tls_context *ctx,
        return tls_push_sg(sk, ctx, sg, offset, flags);
 }
 
+bool tls_free_partial_record(struct sock *sk, struct tls_context *ctx)
+{
+       struct scatterlist *sg;
+
+       sg = ctx->partially_sent_record;
+       if (!sg)
+               return false;
+
+       while (1) {
+               put_page(sg_page(sg));
+               sk_mem_uncharge(sk, sg->length);
+
+               if (sg_is_last(sg))
+                       break;
+               sg++;
+       }
+       ctx->partially_sent_record = NULL;
+       return true;
+}
+
 static void tls_write_space(struct sock *sk)
 {
        struct tls_context *ctx = tls_get_ctx(sk);
@@ -267,6 +287,10 @@ static void tls_sk_proto_close(struct sock *sk, long timeout)
                kfree(ctx->tx.rec_seq);
                kfree(ctx->tx.iv);
                tls_sw_free_resources_tx(sk);
+#ifdef CONFIG_TLS_DEVICE
+       } else if (ctx->tx_conf == TLS_HW) {
+               tls_device_free_resources_tx(sk);
+#endif
        }
 
        if (ctx->rx_conf == TLS_SW) {
index 4f821edeeae6bad9d3e0c82aeb056e7f508a1522..f780b473827bd407dda1870192e1c8bf19eb4bf2 100644 (file)
@@ -1497,6 +1497,8 @@ static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb,
 
                                return err;
                        }
+               } else {
+                       *zc = false;
                }
 
                rxm->full_len -= padding_length(ctx, tls_ctx, skb);
@@ -2063,20 +2065,7 @@ void tls_sw_free_resources_tx(struct sock *sk)
        /* Free up un-sent records in tx_list. First, free
         * the partially sent record if any at head of tx_list.
         */
-       if (tls_ctx->partially_sent_record) {
-               struct scatterlist *sg = tls_ctx->partially_sent_record;
-
-               while (1) {
-                       put_page(sg_page(sg));
-                       sk_mem_uncharge(sk, sg->length);
-
-                       if (sg_is_last(sg))
-                               break;
-                       sg++;
-               }
-
-               tls_ctx->partially_sent_record = NULL;
-
+       if (tls_free_partial_record(sk, tls_ctx)) {
                rec = list_first_entry(&ctx->tx_list,
                                       struct tls_rec, list);
                list_del(&rec->list);
index ddb838a1b74c0bf04fb8dda43fa2bb471925ad5c..e68d7454f2e3bcf9b278c56c945ad711e68dfdd9 100644 (file)
@@ -2040,8 +2040,8 @@ static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
        struct unix_sock *u = unix_sk(sk);
        struct sk_buff *skb, *last;
        long timeo;
+       int skip;
        int err;
-       int peeked, skip;
 
        err = -EOPNOTSUPP;
        if (flags&MSG_OOB)
@@ -2053,8 +2053,8 @@ static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
                mutex_lock(&u->iolock);
 
                skip = sk_peek_offset(sk, flags);
-               skb = __skb_try_recv_datagram(sk, flags, NULL, &peeked, &skip,
-                                             &err, &last);
+               skb = __skb_try_recv_datagram(sk, flags, NULL, &skip, &err,
+                                             &last);
                if (skb)
                        break;
 
index 33408ba1d7eede0b02cf0e707f28203d4c2681fc..e7ee18ab6cb7143d2ff826644ffb9c980d25e6bd 100644 (file)
@@ -13614,7 +13614,8 @@ static const struct genl_ops nl80211_ops[] = {
                .doit = nl80211_associate,
                .flags = GENL_UNS_ADMIN_PERM,
                .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
-                                 NL80211_FLAG_NEED_RTNL,
+                                 NL80211_FLAG_NEED_RTNL |
+                                 NL80211_FLAG_CLEAR_SKB,
        },
        {
                .cmd = NL80211_CMD_DEAUTHENTICATE,
@@ -13659,14 +13660,16 @@ static const struct genl_ops nl80211_ops[] = {
                .doit = nl80211_connect,
                .flags = GENL_UNS_ADMIN_PERM,
                .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
-                                 NL80211_FLAG_NEED_RTNL,
+                                 NL80211_FLAG_NEED_RTNL |
+                                 NL80211_FLAG_CLEAR_SKB,
        },
        {
                .cmd = NL80211_CMD_UPDATE_CONNECT_PARAMS,
                .doit = nl80211_update_connect_params,
                .flags = GENL_ADMIN_PERM,
                .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
-                                 NL80211_FLAG_NEED_RTNL,
+                                 NL80211_FLAG_NEED_RTNL |
+                                 NL80211_FLAG_CLEAR_SKB,
        },
        {
                .cmd = NL80211_CMD_DISCONNECT,
@@ -13691,7 +13694,8 @@ static const struct genl_ops nl80211_ops[] = {
                .doit = nl80211_setdel_pmksa,
                .flags = GENL_UNS_ADMIN_PERM,
                .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
-                                 NL80211_FLAG_NEED_RTNL,
+                                 NL80211_FLAG_NEED_RTNL |
+                                 NL80211_FLAG_CLEAR_SKB,
        },
        {
                .cmd = NL80211_CMD_DEL_PMKSA,
@@ -13999,7 +14003,8 @@ static const struct genl_ops nl80211_ops[] = {
                .dumpit = nl80211_vendor_cmd_dump,
                .flags = GENL_UNS_ADMIN_PERM,
                .internal_flags = NL80211_FLAG_NEED_WIPHY |
-                                 NL80211_FLAG_NEED_RTNL,
+                                 NL80211_FLAG_NEED_RTNL |
+                                 NL80211_FLAG_CLEAR_SKB,
        },
        {
                .cmd = NL80211_CMD_SET_QOS_MAP,
@@ -14047,7 +14052,8 @@ static const struct genl_ops nl80211_ops[] = {
                .cmd = NL80211_CMD_SET_PMK,
                .doit = nl80211_set_pmk,
                .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
-                                 NL80211_FLAG_NEED_RTNL,
+                                 NL80211_FLAG_NEED_RTNL |
+                                 NL80211_FLAG_CLEAR_SKB,
        },
        {
                .cmd = NL80211_CMD_DEL_PMK,
index 2f1bf91eb2265a26bcebeeb3589735e77a3a9daa..0ba778f371cb25fe6b610dc6e112beed2db55e9b 100644 (file)
@@ -1309,6 +1309,16 @@ reg_intersect_dfs_region(const enum nl80211_dfs_regions dfs_region1,
        return dfs_region1;
 }
 
+static void reg_wmm_rules_intersect(const struct ieee80211_wmm_ac *wmm_ac1,
+                                   const struct ieee80211_wmm_ac *wmm_ac2,
+                                   struct ieee80211_wmm_ac *intersect)
+{
+       intersect->cw_min = max_t(u16, wmm_ac1->cw_min, wmm_ac2->cw_min);
+       intersect->cw_max = max_t(u16, wmm_ac1->cw_max, wmm_ac2->cw_max);
+       intersect->cot = min_t(u16, wmm_ac1->cot, wmm_ac2->cot);
+       intersect->aifsn = max_t(u8, wmm_ac1->aifsn, wmm_ac2->aifsn);
+}
+
 /*
  * Helper for regdom_intersect(), this does the real
  * mathematical intersection fun
@@ -1323,6 +1333,8 @@ static int reg_rules_intersect(const struct ieee80211_regdomain *rd1,
        struct ieee80211_freq_range *freq_range;
        const struct ieee80211_power_rule *power_rule1, *power_rule2;
        struct ieee80211_power_rule *power_rule;
+       const struct ieee80211_wmm_rule *wmm_rule1, *wmm_rule2;
+       struct ieee80211_wmm_rule *wmm_rule;
        u32 freq_diff, max_bandwidth1, max_bandwidth2;
 
        freq_range1 = &rule1->freq_range;
@@ -1333,6 +1345,10 @@ static int reg_rules_intersect(const struct ieee80211_regdomain *rd1,
        power_rule2 = &rule2->power_rule;
        power_rule = &intersected_rule->power_rule;
 
+       wmm_rule1 = &rule1->wmm_rule;
+       wmm_rule2 = &rule2->wmm_rule;
+       wmm_rule = &intersected_rule->wmm_rule;
+
        freq_range->start_freq_khz = max(freq_range1->start_freq_khz,
                                         freq_range2->start_freq_khz);
        freq_range->end_freq_khz = min(freq_range1->end_freq_khz,
@@ -1376,6 +1392,29 @@ static int reg_rules_intersect(const struct ieee80211_regdomain *rd1,
        intersected_rule->dfs_cac_ms = max(rule1->dfs_cac_ms,
                                           rule2->dfs_cac_ms);
 
+       if (rule1->has_wmm && rule2->has_wmm) {
+               u8 ac;
+
+               for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+                       reg_wmm_rules_intersect(&wmm_rule1->client[ac],
+                                               &wmm_rule2->client[ac],
+                                               &wmm_rule->client[ac]);
+                       reg_wmm_rules_intersect(&wmm_rule1->ap[ac],
+                                               &wmm_rule2->ap[ac],
+                                               &wmm_rule->ap[ac]);
+               }
+
+               intersected_rule->has_wmm = true;
+       } else if (rule1->has_wmm) {
+               *wmm_rule = *wmm_rule1;
+               intersected_rule->has_wmm = true;
+       } else if (rule2->has_wmm) {
+               *wmm_rule = *wmm_rule2;
+               intersected_rule->has_wmm = true;
+       } else {
+               intersected_rule->has_wmm = false;
+       }
+
        if (!is_valid_reg_rule(intersected_rule))
                return -EINVAL;
 
index 287518c6caa40204525993d8b2477e269324f378..04d888628f29dcca952d38d48b785d5e2c56dfef 100644 (file)
@@ -190,10 +190,9 @@ static size_t cfg80211_gen_new_ie(const u8 *ie, size_t ielen,
        /* copy subelement as we need to change its content to
         * mark an ie after it is processed.
         */
-       sub_copy = kmalloc(subie_len, gfp);
+       sub_copy = kmemdup(subelement, subie_len, gfp);
        if (!sub_copy)
                return 0;
-       memcpy(sub_copy, subelement, subie_len);
 
        pos = &new_ie[0];
 
index e4b8db5e81ec710db0ee8e779046e04263441e08..75899b62bdc9ed2116a1420a6035c904307f4838 100644 (file)
@@ -1220,9 +1220,11 @@ static u32 cfg80211_calculate_bitrate_he(struct rate_info *rate)
        else if (rate->bw == RATE_INFO_BW_HE_RU &&
                 rate->he_ru_alloc == NL80211_RATE_INFO_HE_RU_ALLOC_26)
                result = rates_26[rate->he_gi];
-       else if (WARN(1, "invalid HE MCS: bw:%d, ru:%d\n",
-                     rate->bw, rate->he_ru_alloc))
+       else {
+               WARN(1, "invalid HE MCS: bw:%d, ru:%d\n",
+                    rate->bw, rate->he_ru_alloc);
                return 0;
+       }
 
        /* now scale to the appropriate MCS */
        tmp = result;
index 5cd7c1d1a5d5637247cf51ba86fd4543d7190a46..7409722727ca16fcffa9c7bc5fe03f6a749cc4bf 100644 (file)
@@ -13,4 +13,5 @@
 #define asm_volatile_goto(x...) asm volatile("invalid use of asm_volatile_goto")
 #endif
 
+#define volatile(x...) volatile("")
 #endif
index f06063af9fcb9a9fe4ca2eb90c68669b2ad6ca11..bb315ce1b8660cf86fd70a2b3d917d0742064706 100644 (file)
@@ -28,6 +28,11 @@ static void print_ksym(__u64 addr)
        if (!addr)
                return;
        sym = ksym_search(addr);
+       if (!sym) {
+               printf("ksym not found. Is kallsyms loaded?\n");
+               return;
+       }
+
        if (PRINT_RAW_ADDR)
                printf("%s/%llx;", sym->name, addr);
        else
index 216c7ecbbbe9ef201ff3e7225e0cbf71a4f44718..23b90a45c80251ca3765f75873d1a85d423cf1ab 100644 (file)
@@ -109,6 +109,11 @@ static void print_ip_map(int fd)
        for (i = 0; i < max; i++) {
                if (counts[i].ip > PAGE_OFFSET) {
                        sym = ksym_search(counts[i].ip);
+                       if (!sym) {
+                               printf("ksym not found. Is kallsyms loaded?\n");
+                               continue;
+                       }
+
                        printf("0x%-17llx %-32s %u\n", counts[i].ip, sym->name,
                               counts[i].count);
                } else {
index 8d3e9cfa190978e56d00bde53a22d3f72f9e9b7e..2556af2d9b3e8e2725bfc2c062f99d588b7f727a 100644 (file)
@@ -37,8 +37,13 @@ int main(int ac, char **argv)
                        bpf_map_lookup_elem(map_fd[0], &next_key, &value);
                        assert(next_key == value);
                        sym = ksym_search(value);
-                       printf(" %s", sym->name);
                        key = next_key;
+                       if (!sym) {
+                               printf("ksym not found. Is kallsyms loaded?\n");
+                               continue;
+                       }
+
+                       printf(" %s", sym->name);
                }
                if (key)
                        printf("\n");
index d08046ab81f043505e0ea42a2e8c85661ae68f76..d4178f60e075f099efe340c4d22b05ce4df013ef 100644 (file)
@@ -34,6 +34,11 @@ static void print_ksym(__u64 addr)
        if (!addr)
                return;
        sym = ksym_search(addr);
+       if (!sym) {
+               printf("ksym not found. Is kallsyms loaded?\n");
+               return;
+       }
+
        printf("%s;", sym->name);
        if (!strcmp(sym->name, "sys_read"))
                sys_read_seen = true;
index 2554a15ecf2b8796c41e593d97c8b55ee97a4620..76ca30cc4791912fde4d7f36e4a90549e653cfbb 100644 (file)
@@ -199,11 +199,8 @@ sub_cmd_record_mcount = perl $(srctree)/scripts/recordmcount.pl "$(ARCH)" \
        "$(if $(part-of-module),1,0)" "$(@)";
 recordmcount_source := $(srctree)/scripts/recordmcount.pl
 endif # BUILD_C_RECORDMCOUNT
-cmd_record_mcount =                                            \
-       if [ "$(findstring $(CC_FLAGS_FTRACE),$(_c_flags))" =   \
-            "$(CC_FLAGS_FTRACE)" ]; then                       \
-               $(sub_cmd_record_mcount)                        \
-       fi
+cmd_record_mcount = $(if $(findstring $(strip $(CC_FLAGS_FTRACE)),$(_c_flags)),        \
+       $(sub_cmd_record_mcount))
 endif # CC_USING_RECORD_MCOUNT
 endif # CONFIG_FTRACE_MCOUNT_RECORD
 
index 5b756278df13e8aa7f5daf92914858afbfd24482..a09333fd7cef81053bf204dd03a4db07bbb833ef 100755 (executable)
@@ -5977,7 +5977,7 @@ sub process {
                                while ($fmt =~ /(\%[\*\d\.]*p(\w))/g) {
                                        $specifier = $1;
                                        $extension = $2;
-                                       if ($extension !~ /[SsBKRraEhMmIiUDdgVCbGNOx]/) {
+                                       if ($extension !~ /[SsBKRraEhMmIiUDdgVCbGNOxt]/) {
                                                $bad_specifier = $specifier;
                                                last;
                                        }
diff --git a/scripts/coccinelle/api/stream_open.cocci b/scripts/coccinelle/api/stream_open.cocci
new file mode 100644 (file)
index 0000000..350145d
--- /dev/null
@@ -0,0 +1,363 @@
+// SPDX-License-Identifier: GPL-2.0
+// Author: Kirill Smelkov (kirr@nexedi.com)
+//
+// Search for stream-like files that are using nonseekable_open and convert
+// them to stream_open. A stream-like file is a file that does not use ppos in
+// its read and write. Rationale for the conversion is to avoid deadlock in
+// between read and write.
+
+virtual report
+virtual patch
+virtual explain  // explain decisions in the patch (SPFLAGS="-D explain")
+
+// stream-like reader & writer - ones that do not depend on f_pos.
+@ stream_reader @
+identifier readstream, ppos;
+identifier f, buf, len;
+type loff_t;
+@@
+  ssize_t readstream(struct file *f, char *buf, size_t len, loff_t *ppos)
+  {
+    ... when != ppos
+  }
+
+@ stream_writer @
+identifier writestream, ppos;
+identifier f, buf, len;
+type loff_t;
+@@
+  ssize_t writestream(struct file *f, const char *buf, size_t len, loff_t *ppos)
+  {
+    ... when != ppos
+  }
+
+
+// a function that blocks
+@ blocks @
+identifier block_f;
+identifier wait_event =~ "^wait_event_.*";
+@@
+  block_f(...) {
+    ... when exists
+    wait_event(...)
+    ... when exists
+  }
+
+// stream_reader that can block inside.
+//
+// XXX wait_* can be called not directly from current function (e.g. func -> f -> g -> wait())
+// XXX currently reader_blocks supports only direct and 1-level indirect cases.
+@ reader_blocks_direct @
+identifier stream_reader.readstream;
+identifier wait_event =~ "^wait_event_.*";
+@@
+  readstream(...)
+  {
+    ... when exists
+    wait_event(...)
+    ... when exists
+  }
+
+@ reader_blocks_1 @
+identifier stream_reader.readstream;
+identifier blocks.block_f;
+@@
+  readstream(...)
+  {
+    ... when exists
+    block_f(...)
+    ... when exists
+  }
+
+@ reader_blocks depends on reader_blocks_direct || reader_blocks_1 @
+identifier stream_reader.readstream;
+@@
+  readstream(...) {
+    ...
+  }
+
+
+// file_operations + whether they have _any_ .read, .write, .llseek ... at all.
+//
+// XXX add support for file_operations xxx[N] = ...    (sound/core/pcm_native.c)
+@ fops0 @
+identifier fops;
+@@
+  struct file_operations fops = {
+    ...
+  };
+
+@ has_read @
+identifier fops0.fops;
+identifier read_f;
+@@
+  struct file_operations fops = {
+    .read = read_f,
+  };
+
+@ has_read_iter @
+identifier fops0.fops;
+identifier read_iter_f;
+@@
+  struct file_operations fops = {
+    .read_iter = read_iter_f,
+  };
+
+@ has_write @
+identifier fops0.fops;
+identifier write_f;
+@@
+  struct file_operations fops = {
+    .write = write_f,
+  };
+
+@ has_write_iter @
+identifier fops0.fops;
+identifier write_iter_f;
+@@
+  struct file_operations fops = {
+    .write_iter = write_iter_f,
+  };
+
+@ has_llseek @
+identifier fops0.fops;
+identifier llseek_f;
+@@
+  struct file_operations fops = {
+    .llseek = llseek_f,
+  };
+
+@ has_no_llseek @
+identifier fops0.fops;
+@@
+  struct file_operations fops = {
+    .llseek = no_llseek,
+  };
+
+@ has_mmap @
+identifier fops0.fops;
+identifier mmap_f;
+@@
+  struct file_operations fops = {
+    .mmap = mmap_f,
+  };
+
+@ has_copy_file_range @
+identifier fops0.fops;
+identifier copy_file_range_f;
+@@
+  struct file_operations fops = {
+    .copy_file_range = copy_file_range_f,
+  };
+
+@ has_remap_file_range @
+identifier fops0.fops;
+identifier remap_file_range_f;
+@@
+  struct file_operations fops = {
+    .remap_file_range = remap_file_range_f,
+  };
+
+@ has_splice_read @
+identifier fops0.fops;
+identifier splice_read_f;
+@@
+  struct file_operations fops = {
+    .splice_read = splice_read_f,
+  };
+
+@ has_splice_write @
+identifier fops0.fops;
+identifier splice_write_f;
+@@
+  struct file_operations fops = {
+    .splice_write = splice_write_f,
+  };
+
+
+// file_operations that is candidate for stream_open conversion - it does not
+// use mmap and other methods that assume @offset access to file.
+//
+// XXX for simplicity require no .{read/write}_iter and no .splice_{read/write} for now.
+// XXX maybe_steam.fops cannot be used in other rules - it gives "bad rule maybe_stream or bad variable fops".
+@ maybe_stream depends on (!has_llseek || has_no_llseek) && !has_mmap && !has_copy_file_range && !has_remap_file_range && !has_read_iter && !has_write_iter && !has_splice_read && !has_splice_write @
+identifier fops0.fops;
+@@
+  struct file_operations fops = {
+  };
+
+
+// ---- conversions ----
+
+// XXX .open = nonseekable_open -> .open = stream_open
+// XXX .open = func -> openfunc -> nonseekable_open
+
+// read & write
+//
+// if both are used in the same file_operations together with an opener -
+// under that conditions we can use stream_open instead of nonseekable_open.
+@ fops_rw depends on maybe_stream @
+identifier fops0.fops, openfunc;
+identifier stream_reader.readstream;
+identifier stream_writer.writestream;
+@@
+  struct file_operations fops = {
+      .open  = openfunc,
+      .read  = readstream,
+      .write = writestream,
+  };
+
+@ report_rw depends on report @
+identifier fops_rw.openfunc;
+position p1;
+@@
+  openfunc(...) {
+    <...
+     nonseekable_open@p1
+    ...>
+  }
+
+@ script:python depends on report && reader_blocks @
+fops << fops0.fops;
+p << report_rw.p1;
+@@
+coccilib.report.print_report(p[0],
+  "ERROR: %s: .read() can deadlock .write(); change nonseekable_open -> stream_open to fix." % (fops,))
+
+@ script:python depends on report && !reader_blocks @
+fops << fops0.fops;
+p << report_rw.p1;
+@@
+coccilib.report.print_report(p[0],
+  "WARNING: %s: .read() and .write() have stream semantic; safe to change nonseekable_open -> stream_open." % (fops,))
+
+
+@ explain_rw_deadlocked depends on explain && reader_blocks @
+identifier fops_rw.openfunc;
+@@
+  openfunc(...) {
+    <...
+-    nonseekable_open
++    nonseekable_open /* read & write (was deadlock) */
+    ...>
+  }
+
+
+@ explain_rw_nodeadlock depends on explain && !reader_blocks @
+identifier fops_rw.openfunc;
+@@
+  openfunc(...) {
+    <...
+-    nonseekable_open
++    nonseekable_open /* read & write (no direct deadlock) */
+    ...>
+  }
+
+@ patch_rw depends on patch @
+identifier fops_rw.openfunc;
+@@
+  openfunc(...) {
+    <...
+-   nonseekable_open
++   stream_open
+    ...>
+  }
+
+
+// read, but not write
+@ fops_r depends on maybe_stream && !has_write @
+identifier fops0.fops, openfunc;
+identifier stream_reader.readstream;
+@@
+  struct file_operations fops = {
+      .open  = openfunc,
+      .read  = readstream,
+  };
+
+@ report_r depends on report @
+identifier fops_r.openfunc;
+position p1;
+@@
+  openfunc(...) {
+    <...
+    nonseekable_open@p1
+    ...>
+  }
+
+@ script:python depends on report @
+fops << fops0.fops;
+p << report_r.p1;
+@@
+coccilib.report.print_report(p[0],
+  "WARNING: %s: .read() has stream semantic; safe to change nonseekable_open -> stream_open." % (fops,))
+
+@ explain_r depends on explain @
+identifier fops_r.openfunc;
+@@
+  openfunc(...) {
+    <...
+-   nonseekable_open
++   nonseekable_open /* read only */
+    ...>
+  }
+
+@ patch_r depends on patch @
+identifier fops_r.openfunc;
+@@
+  openfunc(...) {
+    <...
+-   nonseekable_open
++   stream_open
+    ...>
+  }
+
+
+// write, but not read
+@ fops_w depends on maybe_stream && !has_read @
+identifier fops0.fops, openfunc;
+identifier stream_writer.writestream;
+@@
+  struct file_operations fops = {
+      .open  = openfunc,
+      .write = writestream,
+  };
+
+@ report_w depends on report @
+identifier fops_w.openfunc;
+position p1;
+@@
+  openfunc(...) {
+    <...
+    nonseekable_open@p1
+    ...>
+  }
+
+@ script:python depends on report @
+fops << fops0.fops;
+p << report_w.p1;
+@@
+coccilib.report.print_report(p[0],
+  "WARNING: %s: .write() has stream semantic; safe to change nonseekable_open -> stream_open." % (fops,))
+
+@ explain_w depends on explain @
+identifier fops_w.openfunc;
+@@
+  openfunc(...) {
+    <...
+-   nonseekable_open
++   nonseekable_open /* write only */
+    ...>
+  }
+
+@ patch_w depends on patch @
+identifier fops_w.openfunc;
+@@
+  openfunc(...) {
+    <...
+-   nonseekable_open
++   stream_open
+    ...>
+  }
+
+
+// no read, no write - don't change anything
index 7395697e7f19a5f524d883b7079197434c2983ae..c9f071b0a0ab70b647bec3633571059934ece1bc 100644 (file)
@@ -32,6 +32,7 @@ if (id == NULL || ...) { ... return ...; }
 (    id
 |    (T2)dev_get_drvdata(&id->dev)
 |    (T3)platform_get_drvdata(id)
+|    &id->dev
 );
 | return@p2 ...;
 )
index 481cf301ccfc3abf2b68c8dcc8b59612ed2e9841..08470362199c7389009982ec41fed3b9860b89cc 100644 (file)
@@ -1,4 +1,4 @@
-/// Use ARRAY_SIZE instead of dividing sizeof array with sizeof an element
+/// Correct the size argument to alloc functions
 ///
 //# This makes an effort to find cases where the argument to sizeof is wrong
 //# in memory allocation functions by checking the type of the allocated memory
index 611945611bf8352d4831a51c411c2d3d5d7afc59..1dcfb288ee63630e7e73be6fe28f1fd1a3bc5857 100644 (file)
@@ -113,7 +113,8 @@ int dialog_inputbox(const char *title, const char *prompt, int height, int width
                        case KEY_DOWN:
                                break;
                        case KEY_BACKSPACE:
-                       case 127:
+                       case 8:   /* ^H */
+                       case 127: /* ^? */
                                if (pos) {
                                        wattrset(dialog, dlg.inputbox.atr);
                                        if (input_x == 0) {
index a4670f4e825a8c779cf4894587b6e7e31f556b02..ac92c0ded6c5c627e974679ef967d4bc37b25a53 100644 (file)
@@ -1048,7 +1048,7 @@ static int do_match(int key, struct match_state *state, int *ans)
                state->match_direction = FIND_NEXT_MATCH_UP;
                *ans = get_mext_match(state->pattern,
                                state->match_direction);
-       } else if (key == KEY_BACKSPACE || key == 127) {
+       } else if (key == KEY_BACKSPACE || key == 8 || key == 127) {
                state->pattern[strlen(state->pattern)-1] = '\0';
                adj_match_dir(&state->match_direction);
        } else
index 7be620a1fcdb8191639aaeaca7b5c6ae421d9769..77f525a8617c27788cc30f9a65c41041050806ed 100644 (file)
@@ -439,7 +439,8 @@ int dialog_inputbox(WINDOW *main_window,
                case KEY_F(F_EXIT):
                case KEY_F(F_BACK):
                        break;
-               case 127:
+               case 8:   /* ^H */
+               case 127: /* ^? */
                case KEY_BACKSPACE:
                        if (cursor_position > 0) {
                                memmove(&result[cursor_position-1],
index dc0e8c5a140239c6a3bb13ccfbca5ab522328a28..dd2b31ccca6a40f5c8a8d10bba81d76ea1425d15 100755 (executable)
@@ -35,7 +35,7 @@ set -e
 info()
 {
        if [ "${quiet}" != "silent_" ]; then
-               printf "  %-7s %s\n" ${1} ${2}
+               printf "  %-7s %s\n" "${1}" "${2}"
        fi
 }
 
@@ -91,6 +91,20 @@ vmlinux_link()
        fi
 }
 
+# generate .BTF typeinfo from DWARF debuginfo
+gen_btf()
+{
+       local pahole_ver;
+
+       pahole_ver=$(${PAHOLE} --version | sed -E 's/v([0-9]+)\.([0-9]+)/\1\2/')
+       if [ "${pahole_ver}" -lt "113" ]; then
+               info "BTF" "${1}: pahole version $(${PAHOLE} --version) is too old, need at least v1.13"
+               exit 0
+       fi
+
+       info "BTF" ${1}
+       LLVM_OBJCOPY=${OBJCOPY} ${PAHOLE} -J ${1}
+}
 
 # Create ${2} .o file with all symbols from the ${1} object file
 kallsyms()
@@ -248,6 +262,10 @@ fi
 info LD vmlinux
 vmlinux_link "${kallsymso}" vmlinux
 
+if [ -n "${CONFIG_DEBUG_INFO_BTF}" ]; then
+       gen_btf vmlinux
+fi
+
 if [ -n "${CONFIG_BUILDTIME_EXTABLE_SORT}" ]; then
        info SORTEX vmlinux
        sortextable vmlinux
index 0b0d1080b1c5ef4903a3b87d8fbfbc11b165e739..f277e116e0ebf64e350c636443b2f7c663b599ea 100644 (file)
@@ -639,7 +639,7 @@ static void handle_modversions(struct module *mod, struct elf_info *info,
                               info->sechdrs[sym->st_shndx].sh_offset -
                               (info->hdr->e_type != ET_REL ?
                                info->sechdrs[sym->st_shndx].sh_addr : 0);
-                       crc = *crcp;
+                       crc = TO_NATIVE(*crcp);
                }
                sym_update_crc(symname + strlen("__crc_"), mod, crc,
                                export);
index 1d6463fb1450c03b8739b102b48b64e05aaa533e..353cfef71d4e9b89f0a71e46748b50dc5ce00c09 100644 (file)
@@ -239,8 +239,46 @@ source "security/safesetid/Kconfig"
 
 source "security/integrity/Kconfig"
 
+choice
+       prompt "First legacy 'major LSM' to be initialized"
+       default DEFAULT_SECURITY_SELINUX if SECURITY_SELINUX
+       default DEFAULT_SECURITY_SMACK if SECURITY_SMACK
+       default DEFAULT_SECURITY_TOMOYO if SECURITY_TOMOYO
+       default DEFAULT_SECURITY_APPARMOR if SECURITY_APPARMOR
+       default DEFAULT_SECURITY_DAC
+
+       help
+         This choice is there only for converting CONFIG_DEFAULT_SECURITY
+         in old kernel configs to CONFIG_LSM in new kernel configs. Don't
+         change this choice unless you are creating a fresh kernel config,
+         for this choice will be ignored after CONFIG_LSM has been set.
+
+         Selects the legacy "major security module" that will be
+         initialized first. Overridden by non-default CONFIG_LSM.
+
+       config DEFAULT_SECURITY_SELINUX
+               bool "SELinux" if SECURITY_SELINUX=y
+
+       config DEFAULT_SECURITY_SMACK
+               bool "Simplified Mandatory Access Control" if SECURITY_SMACK=y
+
+       config DEFAULT_SECURITY_TOMOYO
+               bool "TOMOYO" if SECURITY_TOMOYO=y
+
+       config DEFAULT_SECURITY_APPARMOR
+               bool "AppArmor" if SECURITY_APPARMOR=y
+
+       config DEFAULT_SECURITY_DAC
+               bool "Unix Discretionary Access Controls"
+
+endchoice
+
 config LSM
        string "Ordered list of enabled LSMs"
+       default "yama,loadpin,safesetid,integrity,smack,selinux,tomoyo,apparmor" if DEFAULT_SECURITY_SMACK
+       default "yama,loadpin,safesetid,integrity,apparmor,selinux,smack,tomoyo" if DEFAULT_SECURITY_APPARMOR
+       default "yama,loadpin,safesetid,integrity,tomoyo" if DEFAULT_SECURITY_TOMOYO
+       default "yama,loadpin,safesetid,integrity" if DEFAULT_SECURITY_DAC
        default "yama,loadpin,safesetid,integrity,selinux,smack,tomoyo,apparmor"
        help
          A comma-separated list of LSMs, in initialization order.
index 49d664ddff444810ef9c6e8a1b0276c5ba473c53..87500bde5a92d599ccaa4e892ff12527feac58eb 100644 (file)
@@ -1336,9 +1336,16 @@ module_param_named(path_max, aa_g_path_max, aauint, S_IRUSR);
 bool aa_g_paranoid_load = true;
 module_param_named(paranoid_load, aa_g_paranoid_load, aabool, S_IRUGO);
 
+static int param_get_aaintbool(char *buffer, const struct kernel_param *kp);
+static int param_set_aaintbool(const char *val, const struct kernel_param *kp);
+#define param_check_aaintbool param_check_int
+static const struct kernel_param_ops param_ops_aaintbool = {
+       .set = param_set_aaintbool,
+       .get = param_get_aaintbool
+};
 /* Boot time disable flag */
 static int apparmor_enabled __lsm_ro_after_init = 1;
-module_param_named(enabled, apparmor_enabled, int, 0444);
+module_param_named(enabled, apparmor_enabled, aaintbool, 0444);
 
 static int __init apparmor_enabled_setup(char *str)
 {
@@ -1413,6 +1420,46 @@ static int param_get_aauint(char *buffer, const struct kernel_param *kp)
        return param_get_uint(buffer, kp);
 }
 
+/* Can only be set before AppArmor is initialized (i.e. on boot cmdline). */
+static int param_set_aaintbool(const char *val, const struct kernel_param *kp)
+{
+       struct kernel_param kp_local;
+       bool value;
+       int error;
+
+       if (apparmor_initialized)
+               return -EPERM;
+
+       /* Create local copy, with arg pointing to bool type. */
+       value = !!*((int *)kp->arg);
+       memcpy(&kp_local, kp, sizeof(kp_local));
+       kp_local.arg = &value;
+
+       error = param_set_bool(val, &kp_local);
+       if (!error)
+               *((int *)kp->arg) = *((bool *)kp_local.arg);
+       return error;
+}
+
+/*
+ * To avoid changing /sys/module/apparmor/parameters/enabled from Y/N to
+ * 1/0, this converts the "int that is actually bool" back to bool for
+ * display in the /sys filesystem, while keeping it "int" for the LSM
+ * infrastructure.
+ */
+static int param_get_aaintbool(char *buffer, const struct kernel_param *kp)
+{
+       struct kernel_param kp_local;
+       bool value;
+
+       /* Create local copy, with arg pointing to bool type. */
+       value = !!*((int *)kp->arg);
+       memcpy(&kp_local, kp, sizeof(kp_local));
+       kp_local.arg = &value;
+
+       return param_get_bool(buffer, &kp_local);
+}
+
 static int param_get_audit(char *buffer, const struct kernel_param *kp)
 {
        if (!apparmor_enabled)
index bcc9c6ead7fd30962cf2ac3755e61d011d62c3da..efdbf17f3915259ea34ed76f5d452a4763236043 100644 (file)
@@ -125,7 +125,7 @@ static int TSS_rawhmac(unsigned char *digest, const unsigned char *key,
  */
 int TSS_authhmac(unsigned char *digest, const unsigned char *key,
                        unsigned int keylen, unsigned char *h1,
-                       unsigned char *h2, unsigned char h3, ...)
+                       unsigned char *h2, unsigned int h3, ...)
 {
        unsigned char paramdigest[SHA1_DIGEST_SIZE];
        struct sdesc *sdesc;
@@ -135,13 +135,16 @@ int TSS_authhmac(unsigned char *digest, const unsigned char *key,
        int ret;
        va_list argp;
 
+       if (!chip)
+               return -ENODEV;
+
        sdesc = init_sdesc(hashalg);
        if (IS_ERR(sdesc)) {
                pr_info("trusted_key: can't alloc %s\n", hash_alg);
                return PTR_ERR(sdesc);
        }
 
-       c = h3;
+       c = !!h3;
        ret = crypto_shash_init(&sdesc->shash);
        if (ret < 0)
                goto out;
@@ -196,6 +199,9 @@ int TSS_checkhmac1(unsigned char *buffer,
        va_list argp;
        int ret;
 
+       if (!chip)
+               return -ENODEV;
+
        bufsize = LOAD32(buffer, TPM_SIZE_OFFSET);
        tag = LOAD16(buffer, 0);
        ordinal = command;
@@ -363,6 +369,9 @@ int trusted_tpm_send(unsigned char *cmd, size_t buflen)
 {
        int rc;
 
+       if (!chip)
+               return -ENODEV;
+
        dump_tpm_buf(cmd);
        rc = tpm_send(chip, cmd, buflen);
        dump_tpm_buf(cmd);
@@ -429,6 +438,9 @@ int oiap(struct tpm_buf *tb, uint32_t *handle, unsigned char *nonce)
 {
        int ret;
 
+       if (!chip)
+               return -ENODEV;
+
        INIT_BUF(tb);
        store16(tb, TPM_TAG_RQU_COMMAND);
        store32(tb, TPM_OIAP_SIZE);
@@ -1245,9 +1257,13 @@ static int __init init_trusted(void)
 {
        int ret;
 
+       /* encrypted_keys.ko depends on successful load of this module even if
+        * TPM is not used.
+        */
        chip = tpm_default_chip();
        if (!chip)
-               return -ENOENT;
+               return 0;
+
        ret = init_digests();
        if (ret < 0)
                goto err_put;
@@ -1269,10 +1285,12 @@ static int __init init_trusted(void)
 
 static void __exit cleanup_trusted(void)
 {
-       put_device(&chip->dev);
-       kfree(digests);
-       trusted_shash_release();
-       unregister_key_type(&key_type_trusted);
+       if (chip) {
+               put_device(&chip->dev);
+               kfree(digests);
+               trusted_shash_release();
+               unregister_key_type(&key_type_trusted);
+       }
 }
 
 late_initcall(init_trusted);
index 57cc60722dd3855021c56a3e46d900e1f0ad0efe..efac68556b4571e0ebef345935cdba035a13a285 100644 (file)
@@ -206,7 +206,7 @@ static void yama_ptracer_del(struct task_struct *tracer,
  * yama_task_free - check for task_pid to remove from exception list
  * @task: task being removed
  */
-void yama_task_free(struct task_struct *task)
+static void yama_task_free(struct task_struct *task)
 {
        yama_ptracer_del(task, task);
 }
@@ -222,7 +222,7 @@ void yama_task_free(struct task_struct *task)
  * Return 0 on success, -ve on error.  -ENOSYS is returned when Yama
  * does not handle the given option.
  */
-int yama_task_prctl(int option, unsigned long arg2, unsigned long arg3,
+static int yama_task_prctl(int option, unsigned long arg2, unsigned long arg3,
                           unsigned long arg4, unsigned long arg5)
 {
        int rc = -ENOSYS;
@@ -401,7 +401,7 @@ static int yama_ptrace_access_check(struct task_struct *child,
  *
  * Returns 0 if following the ptrace is allowed, -ve on error.
  */
-int yama_ptrace_traceme(struct task_struct *parent)
+static int yama_ptrace_traceme(struct task_struct *parent)
 {
        int rc = 0;
 
@@ -452,7 +452,7 @@ static int yama_dointvec_minmax(struct ctl_table *table, int write,
 static int zero;
 static int max_scope = YAMA_SCOPE_NO_ATTACH;
 
-struct ctl_path yama_sysctl_path[] = {
+static struct ctl_path yama_sysctl_path[] = {
        { .procname = "kernel", },
        { .procname = "yama", },
        { }
index d5b0d7ba83c4204db42df492a5e35f54a67c470c..f6ae68017608d83cedc394cd5c11891439e8cbec 100644 (file)
@@ -940,6 +940,28 @@ static int snd_pcm_oss_change_params_locked(struct snd_pcm_substream *substream)
        oss_frame_size = snd_pcm_format_physical_width(params_format(params)) *
                         params_channels(params) / 8;
 
+       err = snd_pcm_oss_period_size(substream, params, sparams);
+       if (err < 0)
+               goto failure;
+
+       n = snd_pcm_plug_slave_size(substream, runtime->oss.period_bytes / oss_frame_size);
+       err = snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, n, NULL);
+       if (err < 0)
+               goto failure;
+
+       err = snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_PERIODS,
+                                    runtime->oss.periods, NULL);
+       if (err < 0)
+               goto failure;
+
+       snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL);
+
+       err = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_HW_PARAMS, sparams);
+       if (err < 0) {
+               pcm_dbg(substream->pcm, "HW_PARAMS failed: %i\n", err);
+               goto failure;
+       }
+
 #ifdef CONFIG_SND_PCM_OSS_PLUGINS
        snd_pcm_oss_plugin_clear(substream);
        if (!direct) {
@@ -974,27 +996,6 @@ static int snd_pcm_oss_change_params_locked(struct snd_pcm_substream *substream)
        }
 #endif
 
-       err = snd_pcm_oss_period_size(substream, params, sparams);
-       if (err < 0)
-               goto failure;
-
-       n = snd_pcm_plug_slave_size(substream, runtime->oss.period_bytes / oss_frame_size);
-       err = snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, n, NULL);
-       if (err < 0)
-               goto failure;
-
-       err = snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_PERIODS,
-                                    runtime->oss.periods, NULL);
-       if (err < 0)
-               goto failure;
-
-       snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL);
-
-       if ((err = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_HW_PARAMS, sparams)) < 0) {
-               pcm_dbg(substream->pcm, "HW_PARAMS failed: %i\n", err);
-               goto failure;
-       }
-
        if (runtime->oss.trigger) {
                sw_params->start_threshold = 1;
        } else {
index f731f904e8ccb4e9671523e3b68e7825c779d8d8..1d8452912b14af7b211acc8796d1526d936007a2 100644 (file)
@@ -1445,8 +1445,15 @@ static int snd_pcm_pause(struct snd_pcm_substream *substream, int push)
 static int snd_pcm_pre_suspend(struct snd_pcm_substream *substream, int state)
 {
        struct snd_pcm_runtime *runtime = substream->runtime;
-       if (runtime->status->state == SNDRV_PCM_STATE_SUSPENDED)
+       switch (runtime->status->state) {
+       case SNDRV_PCM_STATE_SUSPENDED:
                return -EBUSY;
+       /* unresumable PCM state; return -EBUSY for skipping suspend */
+       case SNDRV_PCM_STATE_OPEN:
+       case SNDRV_PCM_STATE_SETUP:
+       case SNDRV_PCM_STATE_DISCONNECTED:
+               return -EBUSY;
+       }
        runtime->trigger_master = substream;
        return 0;
 }
index ee601d7f092694aecd7e853845b4e3f73cd0f261..c0690d1ecd55c1ce33c9bc155abd82d2d0ec9edd 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/module.h>
 #include <linux/delay.h>
 #include <linux/mm.h>
+#include <linux/nospec.h>
 #include <sound/rawmidi.h>
 #include <sound/info.h>
 #include <sound/control.h>
@@ -601,6 +602,7 @@ static int __snd_rawmidi_info_select(struct snd_card *card,
                return -ENXIO;
        if (info->stream < 0 || info->stream > 1)
                return -EINVAL;
+       info->stream = array_index_nospec(info->stream, 2);
        pstr = &rmidi->streams[info->stream];
        if (pstr->substream_count == 0)
                return -ENOENT;
index 278ebb9931225998dd07f0606eeabe289d71aff5..c939459172353dee5ee651ee4694f43f2aa9be7d 100644 (file)
@@ -617,13 +617,14 @@ int
 snd_seq_oss_synth_make_info(struct seq_oss_devinfo *dp, int dev, struct synth_info *inf)
 {
        struct seq_oss_synth *rec;
+       struct seq_oss_synthinfo *info = get_synthinfo_nospec(dp, dev);
 
-       if (dev < 0 || dev >= dp->max_synthdev)
+       if (!info)
                return -ENXIO;
 
-       if (dp->synths[dev].is_midi) {
+       if (info->is_midi) {
                struct midi_info minf;
-               snd_seq_oss_midi_make_info(dp, dp->synths[dev].midi_mapped, &minf);
+               snd_seq_oss_midi_make_info(dp, info->midi_mapped, &minf);
                inf->synth_type = SYNTH_TYPE_MIDI;
                inf->synth_subtype = 0;
                inf->nr_voices = 16;
index 7d4640d1fe9fb8a8ab8eecf045798497cb3e38f3..38e7deab638479ef9525c67fcd4f0fe37101cfb2 100644 (file)
@@ -1252,7 +1252,7 @@ static int snd_seq_ioctl_set_client_info(struct snd_seq_client *client,
 
        /* fill the info fields */
        if (client_info->name[0])
-               strlcpy(client->name, client_info->name, sizeof(client->name));
+               strscpy(client->name, client_info->name, sizeof(client->name));
 
        client->filter = client_info->filter;
        client->event_lost = client_info->event_lost;
@@ -1530,7 +1530,7 @@ static int snd_seq_ioctl_create_queue(struct snd_seq_client *client, void *arg)
        /* set queue name */
        if (!info->name[0])
                snprintf(info->name, sizeof(info->name), "Queue-%d", q->queue);
-       strlcpy(q->name, info->name, sizeof(q->name));
+       strscpy(q->name, info->name, sizeof(q->name));
        snd_use_lock_free(&q->use_lock);
 
        return 0;
@@ -1592,7 +1592,7 @@ static int snd_seq_ioctl_set_queue_info(struct snd_seq_client *client,
                queuefree(q);
                return -EPERM;
        }
-       strlcpy(q->name, info->name, sizeof(q->name));
+       strscpy(q->name, info->name, sizeof(q->name));
        queuefree(q);
 
        return 0;
index 9c37d9af3023f67bdba2ba4a3d207580cd1f70c5..ec7715c6b0c02c9bc940ed4b16f387b509bd1907 100644 (file)
@@ -107,7 +107,6 @@ int snd_hdac_ext_bus_init(struct hdac_bus *bus, struct device *dev,
        INIT_LIST_HEAD(&bus->hlink_list);
        bus->idx = idx++;
 
-       mutex_init(&bus->lock);
        bus->cmd_dma_state = true;
 
        return 0;
index 012305177f68227af7bb25890a92c1ef93690221..ad8eee08013fb838e228daaa23cd974a92c34325 100644 (file)
@@ -38,6 +38,7 @@ int snd_hdac_bus_init(struct hdac_bus *bus, struct device *dev,
        INIT_WORK(&bus->unsol_work, snd_hdac_bus_process_unsol_events);
        spin_lock_init(&bus->reg_lock);
        mutex_init(&bus->cmd_mutex);
+       mutex_init(&bus->lock);
        bus->irq = -1;
        return 0;
 }
index 5c95933e739a43bc5cd30829e43c0381cff1e989..1ea51e3b942a034a1b487bb2ad7dc054893a4d39 100644 (file)
@@ -69,13 +69,15 @@ void snd_hdac_display_power(struct hdac_bus *bus, unsigned int idx, bool enable)
 
        dev_dbg(bus->dev, "display power %s\n",
                enable ? "enable" : "disable");
+
+       mutex_lock(&bus->lock);
        if (enable)
                set_bit(idx, &bus->display_power_status);
        else
                clear_bit(idx, &bus->display_power_status);
 
        if (!acomp || !acomp->ops)
-               return;
+               goto unlock;
 
        if (bus->display_power_status) {
                if (!bus->display_power_active) {
@@ -92,6 +94,8 @@ void snd_hdac_display_power(struct hdac_bus *bus, unsigned int idx, bool enable)
                        bus->display_power_active = false;
                }
        }
+ unlock:
+       mutex_unlock(&bus->lock);
 }
 EXPORT_SYMBOL_GPL(snd_hdac_display_power);
 
index ece256a3b48f3b9108615931d8727c86d0d0ded5..2ec91085fa3e7708d27a605747213f2277b9bc2b 100644 (file)
@@ -2142,6 +2142,8 @@ static struct snd_pci_quirk power_save_blacklist[] = {
        SND_PCI_QUIRK(0x8086, 0x2040, "Intel DZ77BH-55K", 0),
        /* https://bugzilla.kernel.org/show_bug.cgi?id=199607 */
        SND_PCI_QUIRK(0x8086, 0x2057, "Intel NUC5i7RYB", 0),
+       /* https://bugs.launchpad.net/bugs/1821663 */
+       SND_PCI_QUIRK(0x8086, 0x2064, "Intel SDP 8086:2064", 0),
        /* https://bugzilla.redhat.com/show_bug.cgi?id=1520902 */
        SND_PCI_QUIRK(0x8086, 0x2068, "Intel NUC7i3BNB", 0),
        /* https://bugzilla.kernel.org/show_bug.cgi?id=198611 */
@@ -2150,6 +2152,8 @@ static struct snd_pci_quirk power_save_blacklist[] = {
        SND_PCI_QUIRK(0x17aa, 0x367b, "Lenovo IdeaCentre B550", 0),
        /* https://bugzilla.redhat.com/show_bug.cgi?id=1572975 */
        SND_PCI_QUIRK(0x17aa, 0x36a7, "Lenovo C50 All in one", 0),
+       /* https://bugs.launchpad.net/bugs/1821663 */
+       SND_PCI_QUIRK(0x1631, 0xe017, "Packard Bell NEC IMEDIA 5204", 0),
        {}
 };
 #endif /* CONFIG_PM */
index 29882bda763289374069ec4777e62b781b416673..e1ebc6d5f38226b10f689b2bc04fd0504331ced2 100644 (file)
@@ -1005,7 +1005,6 @@ struct ca0132_spec {
        unsigned int scp_resp_header;
        unsigned int scp_resp_data[4];
        unsigned int scp_resp_count;
-       bool alt_firmware_present;
        bool startup_check_entered;
        bool dsp_reload;
 
@@ -7518,7 +7517,7 @@ static bool ca0132_download_dsp_images(struct hda_codec *codec)
        bool dsp_loaded = false;
        struct ca0132_spec *spec = codec->spec;
        const struct dsp_image_seg *dsp_os_image;
-       const struct firmware *fw_entry;
+       const struct firmware *fw_entry = NULL;
        /*
         * Alternate firmwares for different variants. The Recon3Di apparently
         * can use the default firmware, but I'll leave the option in case
@@ -7529,33 +7528,26 @@ static bool ca0132_download_dsp_images(struct hda_codec *codec)
        case QUIRK_R3D:
        case QUIRK_AE5:
                if (request_firmware(&fw_entry, DESKTOP_EFX_FILE,
-                                       codec->card->dev) != 0) {
+                                       codec->card->dev) != 0)
                        codec_dbg(codec, "Desktop firmware not found.");
-                       spec->alt_firmware_present = false;
-               } else {
+               else
                        codec_dbg(codec, "Desktop firmware selected.");
-                       spec->alt_firmware_present = true;
-               }
                break;
        case QUIRK_R3DI:
                if (request_firmware(&fw_entry, R3DI_EFX_FILE,
-                                       codec->card->dev) != 0) {
+                                       codec->card->dev) != 0)
                        codec_dbg(codec, "Recon3Di alt firmware not detected.");
-                       spec->alt_firmware_present = false;
-               } else {
+               else
                        codec_dbg(codec, "Recon3Di firmware selected.");
-                       spec->alt_firmware_present = true;
-               }
                break;
        default:
-               spec->alt_firmware_present = false;
                break;
        }
        /*
         * Use default ctefx.bin if no alt firmware is detected, or if none
         * exists for your particular codec.
         */
-       if (!spec->alt_firmware_present) {
+       if (!fw_entry) {
                codec_dbg(codec, "Default firmware selected.");
                if (request_firmware(&fw_entry, EFX_FILE,
                                        codec->card->dev) != 0)
index 191830d4fa4009c4ddd8856a2c95f7f9103375e7..810479766090376fbb2f8ecdb6da6012fb4058a3 100644 (file)
@@ -1864,8 +1864,8 @@ enum {
        ALC887_FIXUP_BASS_CHMAP,
        ALC1220_FIXUP_GB_DUAL_CODECS,
        ALC1220_FIXUP_CLEVO_P950,
-       ALC1220_FIXUP_SYSTEM76_ORYP5,
-       ALC1220_FIXUP_SYSTEM76_ORYP5_PINS,
+       ALC1220_FIXUP_CLEVO_PB51ED,
+       ALC1220_FIXUP_CLEVO_PB51ED_PINS,
 };
 
 static void alc889_fixup_coef(struct hda_codec *codec,
@@ -2070,7 +2070,7 @@ static void alc1220_fixup_clevo_p950(struct hda_codec *codec,
 static void alc_fixup_headset_mode_no_hp_mic(struct hda_codec *codec,
                                const struct hda_fixup *fix, int action);
 
-static void alc1220_fixup_system76_oryp5(struct hda_codec *codec,
+static void alc1220_fixup_clevo_pb51ed(struct hda_codec *codec,
                                     const struct hda_fixup *fix,
                                     int action)
 {
@@ -2322,18 +2322,18 @@ static const struct hda_fixup alc882_fixups[] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc1220_fixup_clevo_p950,
        },
-       [ALC1220_FIXUP_SYSTEM76_ORYP5] = {
+       [ALC1220_FIXUP_CLEVO_PB51ED] = {
                .type = HDA_FIXUP_FUNC,
-               .v.func = alc1220_fixup_system76_oryp5,
+               .v.func = alc1220_fixup_clevo_pb51ed,
        },
-       [ALC1220_FIXUP_SYSTEM76_ORYP5_PINS] = {
+       [ALC1220_FIXUP_CLEVO_PB51ED_PINS] = {
                .type = HDA_FIXUP_PINS,
                .v.pins = (const struct hda_pintbl[]) {
                        { 0x19, 0x01a1913c }, /* use as headset mic, without its own jack detect */
                        {}
                },
                .chained = true,
-               .chain_id = ALC1220_FIXUP_SYSTEM76_ORYP5,
+               .chain_id = ALC1220_FIXUP_CLEVO_PB51ED,
        },
 };
 
@@ -2411,8 +2411,9 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1558, 0x9501, "Clevo P950HR", ALC1220_FIXUP_CLEVO_P950),
        SND_PCI_QUIRK(0x1558, 0x95e1, "Clevo P95xER", ALC1220_FIXUP_CLEVO_P950),
        SND_PCI_QUIRK(0x1558, 0x95e2, "Clevo P950ER", ALC1220_FIXUP_CLEVO_P950),
-       SND_PCI_QUIRK(0x1558, 0x96e1, "System76 Oryx Pro (oryp5)", ALC1220_FIXUP_SYSTEM76_ORYP5_PINS),
-       SND_PCI_QUIRK(0x1558, 0x97e1, "System76 Oryx Pro (oryp5)", ALC1220_FIXUP_SYSTEM76_ORYP5_PINS),
+       SND_PCI_QUIRK(0x1558, 0x96e1, "System76 Oryx Pro (oryp5)", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+       SND_PCI_QUIRK(0x1558, 0x97e1, "System76 Oryx Pro (oryp5)", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+       SND_PCI_QUIRK(0x1558, 0x65d1, "Tuxedo Book XC1509", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
        SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD),
        SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD),
        SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Y530", ALC882_FIXUP_LENOVO_Y530),
@@ -5491,7 +5492,7 @@ static void alc_headset_btn_callback(struct hda_codec *codec,
        jack->jack->button_state = report;
 }
 
-static void alc295_fixup_chromebook(struct hda_codec *codec,
+static void alc_fixup_headset_jack(struct hda_codec *codec,
                                    const struct hda_fixup *fix, int action)
 {
 
@@ -5501,16 +5502,6 @@ static void alc295_fixup_chromebook(struct hda_codec *codec,
                                                    alc_headset_btn_callback);
                snd_hda_jack_add_kctl(codec, 0x55, "Headset Jack", false,
                                      SND_JACK_HEADSET, alc_headset_btn_keymap);
-               switch (codec->core.vendor_id) {
-               case 0x10ec0295:
-                       alc_update_coef_idx(codec, 0x4a, 0x8000, 1 << 15); /* Reset HP JD */
-                       alc_update_coef_idx(codec, 0x4a, 0x8000, 0 << 15);
-                       break;
-               case 0x10ec0236:
-                       alc_update_coef_idx(codec, 0x1b, 0x8000, 1 << 15); /* Reset HP JD */
-                       alc_update_coef_idx(codec, 0x1b, 0x8000, 0 << 15);
-                       break;
-               }
                break;
        case HDA_FIXUP_ACT_INIT:
                switch (codec->core.vendor_id) {
@@ -5531,6 +5522,25 @@ static void alc295_fixup_chromebook(struct hda_codec *codec,
        }
 }
 
+static void alc295_fixup_chromebook(struct hda_codec *codec,
+                                   const struct hda_fixup *fix, int action)
+{
+       switch (action) {
+       case HDA_FIXUP_ACT_INIT:
+               switch (codec->core.vendor_id) {
+               case 0x10ec0295:
+                       alc_update_coef_idx(codec, 0x4a, 0x8000, 1 << 15); /* Reset HP JD */
+                       alc_update_coef_idx(codec, 0x4a, 0x8000, 0 << 15);
+                       break;
+               case 0x10ec0236:
+                       alc_update_coef_idx(codec, 0x1b, 0x8000, 1 << 15); /* Reset HP JD */
+                       alc_update_coef_idx(codec, 0x1b, 0x8000, 0 << 15);
+                       break;
+               }
+               break;
+       }
+}
+
 static void alc_fixup_disable_mic_vref(struct hda_codec *codec,
                                  const struct hda_fixup *fix, int action)
 {
@@ -5663,6 +5673,7 @@ enum {
        ALC233_FIXUP_ASUS_MIC_NO_PRESENCE,
        ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE,
        ALC233_FIXUP_LENOVO_MULTI_CODECS,
+       ALC233_FIXUP_ACER_HEADSET_MIC,
        ALC294_FIXUP_LENOVO_MIC_LOCATION,
        ALC225_FIXUP_DELL_WYSE_MIC_NO_PRESENCE,
        ALC700_FIXUP_INTEL_REFERENCE,
@@ -5684,10 +5695,13 @@ enum {
        ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE,
        ALC255_FIXUP_ACER_HEADSET_MIC,
        ALC295_FIXUP_CHROME_BOOK,
+       ALC225_FIXUP_HEADSET_JACK,
        ALC225_FIXUP_DELL_WYSE_AIO_MIC_NO_PRESENCE,
        ALC225_FIXUP_WYSE_AUTO_MUTE,
        ALC225_FIXUP_WYSE_DISABLE_MIC_VREF,
        ALC286_FIXUP_ACER_AIO_HEADSET_MIC,
+       ALC256_FIXUP_ASUS_MIC_NO_PRESENCE,
+       ALC299_FIXUP_PREDATOR_SPK,
 };
 
 static const struct hda_fixup alc269_fixups[] = {
@@ -6488,6 +6502,16 @@ static const struct hda_fixup alc269_fixups[] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc233_alc662_fixup_lenovo_dual_codecs,
        },
+       [ALC233_FIXUP_ACER_HEADSET_MIC] = {
+               .type = HDA_FIXUP_VERBS,
+               .v.verbs = (const struct hda_verb[]) {
+                       { 0x20, AC_VERB_SET_COEF_INDEX, 0x45 },
+                       { 0x20, AC_VERB_SET_PROC_COEF, 0x5089 },
+                       { }
+               },
+               .chained = true,
+               .chain_id = ALC233_FIXUP_ASUS_MIC_NO_PRESENCE
+       },
        [ALC294_FIXUP_LENOVO_MIC_LOCATION] = {
                .type = HDA_FIXUP_PINS,
                .v.pins = (const struct hda_pintbl[]) {
@@ -6633,6 +6657,12 @@ static const struct hda_fixup alc269_fixups[] = {
        [ALC295_FIXUP_CHROME_BOOK] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc295_fixup_chromebook,
+               .chained = true,
+               .chain_id = ALC225_FIXUP_HEADSET_JACK
+       },
+       [ALC225_FIXUP_HEADSET_JACK] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc_fixup_headset_jack,
        },
        [ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE] = {
                .type = HDA_FIXUP_PINS,
@@ -6696,6 +6726,22 @@ static const struct hda_fixup alc269_fixups[] = {
                .chained = true,
                .chain_id = ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE
        },
+       [ALC256_FIXUP_ASUS_MIC_NO_PRESENCE] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x19, 0x04a11120 }, /* use as headset mic, without its own jack detect */
+                       { }
+               },
+               .chained = true,
+               .chain_id = ALC256_FIXUP_ASUS_HEADSET_MODE
+       },
+       [ALC299_FIXUP_PREDATOR_SPK] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x21, 0x90170150 }, /* use as headset mic, without its own jack detect */
+                       { }
+               }
+       },
 };
 
 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -6712,9 +6758,14 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS),
        SND_PCI_QUIRK(0x1025, 0x102b, "Acer Aspire C24-860", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1025, 0x106d, "Acer Cloudbook 14", ALC283_FIXUP_CHROME_BOOK),
+       SND_PCI_QUIRK(0x1025, 0x1099, "Acer Aspire E5-523G", ALC255_FIXUP_ACER_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1025, 0x110e, "Acer Aspire ES1-432", ALC255_FIXUP_ACER_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1025, 0x1246, "Acer Predator Helios 500", ALC299_FIXUP_PREDATOR_SPK),
        SND_PCI_QUIRK(0x1025, 0x128f, "Acer Veriton Z6860G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
        SND_PCI_QUIRK(0x1025, 0x1290, "Acer Veriton Z4860G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
        SND_PCI_QUIRK(0x1025, 0x1291, "Acer Veriton Z4660G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
+       SND_PCI_QUIRK(0x1025, 0x1308, "Acer Aspire Z24-890", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
+       SND_PCI_QUIRK(0x1025, 0x132a, "Acer TravelMate B114-21", ALC233_FIXUP_ACER_HEADSET_MIC),
        SND_PCI_QUIRK(0x1025, 0x1330, "Acer TravelMate X514-51T", ALC255_FIXUP_ACER_HEADSET_MIC),
        SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
        SND_PCI_QUIRK(0x1028, 0x054b, "Dell XPS one 2710", ALC275_FIXUP_DELL_XPS),
@@ -7110,7 +7161,9 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
        {.id = ALC255_FIXUP_DUMMY_LINEOUT_VERB, .name = "alc255-dummy-lineout"},
        {.id = ALC255_FIXUP_DELL_HEADSET_MIC, .name = "alc255-dell-headset"},
        {.id = ALC295_FIXUP_HP_X360, .name = "alc295-hp-x360"},
-       {.id = ALC295_FIXUP_CHROME_BOOK, .name = "alc-sense-combo"},
+       {.id = ALC225_FIXUP_HEADSET_JACK, .name = "alc-headset-jack"},
+       {.id = ALC295_FIXUP_CHROME_BOOK, .name = "alc-chrome-book"},
+       {.id = ALC299_FIXUP_PREDATOR_SPK, .name = "predator-spk"},
        {}
 };
 #define ALC225_STANDARD_PINS \
@@ -7331,6 +7384,18 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
                {0x14, 0x90170110},
                {0x1b, 0x90a70130},
                {0x21, 0x03211020}),
+       SND_HDA_PIN_QUIRK(0x10ec0256, 0x1043, "ASUS", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE,
+               {0x12, 0x90a60130},
+               {0x14, 0x90170110},
+               {0x21, 0x03211020}),
+       SND_HDA_PIN_QUIRK(0x10ec0256, 0x1043, "ASUS", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE,
+               {0x12, 0x90a60130},
+               {0x14, 0x90170110},
+               {0x21, 0x04211020}),
+       SND_HDA_PIN_QUIRK(0x10ec0256, 0x1043, "ASUS", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE,
+               {0x1a, 0x90a70130},
+               {0x1b, 0x90170110},
+               {0x21, 0x03211020}),
        SND_HDA_PIN_QUIRK(0x10ec0274, 0x1028, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB,
                {0x12, 0xb7a60130},
                {0x13, 0xb8a61140},
index 419114edfd57db8f341184148fcf4d7ab7d67a0a..667fc1d59e189f599e580654c10719b96b7e74a0 100644 (file)
@@ -1151,6 +1151,7 @@ config SND_SOC_WCD9335
        tristate "WCD9335 Codec"
        depends on SLIMBUS
        select REGMAP_SLIMBUS
+       select REGMAP_IRQ
        help
          The WCD9335 is a standalone Hi-Fi audio CODEC IC, supports
          Qualcomm Technologies, Inc. (QTI) multimedia solutions,
index 03bbbcd3b6c115254a75f367c40447f012edc8c4..87616b126018b3cb9b9a5c8f4ca7a3af51b97264 100644 (file)
@@ -2129,6 +2129,7 @@ static int ab8500_codec_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
                dev_err(dai->component->dev,
                        "%s: ERROR: The device is either a master or a slave.\n",
                        __func__);
+               /* fall through */
        default:
                dev_err(dai->component->dev,
                        "%s: ERROR: Unsupporter master mask 0x%x\n",
index 9f4a59871cee72b2011acf20d8167e12933ec2fc..c71696146c5ec17e17751903a592038c4f8347e4 100644 (file)
@@ -1635,6 +1635,16 @@ static int cs35l35_i2c_probe(struct i2c_client *i2c_client,
        return ret;
 }
 
+static int cs35l35_i2c_remove(struct i2c_client *i2c_client)
+{
+       struct cs35l35_private *cs35l35 = i2c_get_clientdata(i2c_client);
+
+       regulator_bulk_disable(cs35l35->num_supplies, cs35l35->supplies);
+       gpiod_set_value_cansleep(cs35l35->reset_gpio, 0);
+
+       return 0;
+}
+
 static const struct of_device_id cs35l35_of_match[] = {
        {.compatible = "cirrus,cs35l35"},
        {},
@@ -1655,6 +1665,7 @@ static struct i2c_driver cs35l35_i2c_driver = {
        },
        .id_table = cs35l35_id,
        .probe = cs35l35_i2c_probe,
+       .remove = cs35l35_i2c_remove,
 };
 
 module_i2c_driver(cs35l35_i2c_driver);
index 33d74f163bd753820bb77bd81d8f0a28461410e5..793a14d586672bc2b76b143b5756206becce28a8 100644 (file)
@@ -642,6 +642,7 @@ static const struct regmap_config cs4270_regmap = {
        .reg_defaults =         cs4270_reg_defaults,
        .num_reg_defaults =     ARRAY_SIZE(cs4270_reg_defaults),
        .cache_type =           REGCACHE_RBTREE,
+       .write_flag_mask =      CS4270_I2C_INCR,
 
        .readable_reg =         cs4270_reg_is_readable,
        .volatile_reg =         cs4270_reg_is_volatile,
index ffecdaaa8cf2bb2e69bde2de7e2acb92cbdec0a1..f889d94c8e3cf707f0bcab6cdb7f860851d7042f 100644 (file)
@@ -38,6 +38,9 @@ static void hdac_hda_dai_close(struct snd_pcm_substream *substream,
                               struct snd_soc_dai *dai);
 static int hdac_hda_dai_prepare(struct snd_pcm_substream *substream,
                                struct snd_soc_dai *dai);
+static int hdac_hda_dai_hw_params(struct snd_pcm_substream *substream,
+                                 struct snd_pcm_hw_params *params,
+                                 struct snd_soc_dai *dai);
 static int hdac_hda_dai_hw_free(struct snd_pcm_substream *substream,
                                struct snd_soc_dai *dai);
 static int hdac_hda_dai_set_tdm_slot(struct snd_soc_dai *dai,
@@ -50,6 +53,7 @@ static const struct snd_soc_dai_ops hdac_hda_dai_ops = {
        .startup = hdac_hda_dai_open,
        .shutdown = hdac_hda_dai_close,
        .prepare = hdac_hda_dai_prepare,
+       .hw_params = hdac_hda_dai_hw_params,
        .hw_free = hdac_hda_dai_hw_free,
        .set_tdm_slot = hdac_hda_dai_set_tdm_slot,
 };
@@ -139,6 +143,39 @@ static int hdac_hda_dai_set_tdm_slot(struct snd_soc_dai *dai,
        return 0;
 }
 
+static int hdac_hda_dai_hw_params(struct snd_pcm_substream *substream,
+                                 struct snd_pcm_hw_params *params,
+                                 struct snd_soc_dai *dai)
+{
+       struct snd_soc_component *component = dai->component;
+       struct hdac_hda_priv *hda_pvt;
+       unsigned int format_val;
+       unsigned int maxbps;
+
+       if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+               maxbps = dai->driver->playback.sig_bits;
+       else
+               maxbps = dai->driver->capture.sig_bits;
+
+       hda_pvt = snd_soc_component_get_drvdata(component);
+       format_val = snd_hdac_calc_stream_format(params_rate(params),
+                                                params_channels(params),
+                                                params_format(params),
+                                                maxbps,
+                                                0);
+       if (!format_val) {
+               dev_err(dai->dev,
+                       "invalid format_val, rate=%d, ch=%d, format=%d, maxbps=%d\n",
+                       params_rate(params), params_channels(params),
+                       params_format(params), maxbps);
+
+               return -EINVAL;
+       }
+
+       hda_pvt->pcm[dai->id].format_val[substream->stream] = format_val;
+       return 0;
+}
+
 static int hdac_hda_dai_hw_free(struct snd_pcm_substream *substream,
                                struct snd_soc_dai *dai)
 {
@@ -162,10 +199,9 @@ static int hdac_hda_dai_prepare(struct snd_pcm_substream *substream,
                                struct snd_soc_dai *dai)
 {
        struct snd_soc_component *component = dai->component;
+       struct hda_pcm_stream *hda_stream;
        struct hdac_hda_priv *hda_pvt;
-       struct snd_pcm_runtime *runtime = substream->runtime;
        struct hdac_device *hdev;
-       struct hda_pcm_stream *hda_stream;
        unsigned int format_val;
        struct hda_pcm *pcm;
        unsigned int stream;
@@ -179,19 +215,8 @@ static int hdac_hda_dai_prepare(struct snd_pcm_substream *substream,
 
        hda_stream = &pcm->stream[substream->stream];
 
-       format_val = snd_hdac_calc_stream_format(runtime->rate,
-                                                runtime->channels,
-                                                runtime->format,
-                                                hda_stream->maxbps,
-                                                0);
-       if (!format_val) {
-               dev_err(&hdev->dev,
-                       "invalid format_val, rate=%d, ch=%d, format=%d\n",
-                       runtime->rate, runtime->channels, runtime->format);
-               return -EINVAL;
-       }
-
        stream = hda_pvt->pcm[dai->id].stream_tag[substream->stream];
+       format_val = hda_pvt->pcm[dai->id].format_val[substream->stream];
 
        ret = snd_hda_codec_prepare(&hda_pvt->codec, hda_stream,
                                    stream, format_val, substream);
index e444ef5933606ce8689f06ba187f3e84491d6336..6b1bd4f428e70ed4037adef636ef9c42878d2865 100644 (file)
@@ -8,6 +8,7 @@
 
 struct hdac_hda_pcm {
        int stream_tag[2];
+       unsigned int format_val[2];
 };
 
 struct hdac_hda_priv {
index e5b6769b9797724ceef38f3b8132200876f227e8..35df73e42cbc5f9d7bf6af4aa954bde49e8565e2 100644 (file)
@@ -484,9 +484,6 @@ static int hdmi_codec_hw_params(struct snd_pcm_substream *substream,
                params_width(params), params_rate(params),
                params_channels(params));
 
-       if (params_width(params) > 24)
-               params->msbits = 24;
-
        ret = snd_pcm_create_iec958_consumer_hw_params(params, hp.iec.status,
                                                       sizeof(hp.iec.status));
        if (ret < 0) {
@@ -529,73 +526,71 @@ static int hdmi_codec_set_fmt(struct snd_soc_dai *dai,
 {
        struct hdmi_codec_priv *hcp = snd_soc_dai_get_drvdata(dai);
        struct hdmi_codec_daifmt cf = { 0 };
-       int ret = 0;
 
        dev_dbg(dai->dev, "%s()\n", __func__);
 
-       if (dai->id == DAI_ID_SPDIF) {
-               cf.fmt = HDMI_SPDIF;
-       } else {
-               switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
-               case SND_SOC_DAIFMT_CBM_CFM:
-                       cf.bit_clk_master = 1;
-                       cf.frame_clk_master = 1;
-                       break;
-               case SND_SOC_DAIFMT_CBS_CFM:
-                       cf.frame_clk_master = 1;
-                       break;
-               case SND_SOC_DAIFMT_CBM_CFS:
-                       cf.bit_clk_master = 1;
-                       break;
-               case SND_SOC_DAIFMT_CBS_CFS:
-                       break;
-               default:
-                       return -EINVAL;
-               }
+       if (dai->id == DAI_ID_SPDIF)
+               return 0;
+
+       switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+       case SND_SOC_DAIFMT_CBM_CFM:
+               cf.bit_clk_master = 1;
+               cf.frame_clk_master = 1;
+               break;
+       case SND_SOC_DAIFMT_CBS_CFM:
+               cf.frame_clk_master = 1;
+               break;
+       case SND_SOC_DAIFMT_CBM_CFS:
+               cf.bit_clk_master = 1;
+               break;
+       case SND_SOC_DAIFMT_CBS_CFS:
+               break;
+       default:
+               return -EINVAL;
+       }
 
-               switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
-               case SND_SOC_DAIFMT_NB_NF:
-                       break;
-               case SND_SOC_DAIFMT_NB_IF:
-                       cf.frame_clk_inv = 1;
-                       break;
-               case SND_SOC_DAIFMT_IB_NF:
-                       cf.bit_clk_inv = 1;
-                       break;
-               case SND_SOC_DAIFMT_IB_IF:
-                       cf.frame_clk_inv = 1;
-                       cf.bit_clk_inv = 1;
-                       break;
-               }
+       switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+       case SND_SOC_DAIFMT_NB_NF:
+               break;
+       case SND_SOC_DAIFMT_NB_IF:
+               cf.frame_clk_inv = 1;
+               break;
+       case SND_SOC_DAIFMT_IB_NF:
+               cf.bit_clk_inv = 1;
+               break;
+       case SND_SOC_DAIFMT_IB_IF:
+               cf.frame_clk_inv = 1;
+               cf.bit_clk_inv = 1;
+               break;
+       }
 
-               switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
-               case SND_SOC_DAIFMT_I2S:
-                       cf.fmt = HDMI_I2S;
-                       break;
-               case SND_SOC_DAIFMT_DSP_A:
-                       cf.fmt = HDMI_DSP_A;
-                       break;
-               case SND_SOC_DAIFMT_DSP_B:
-                       cf.fmt = HDMI_DSP_B;
-                       break;
-               case SND_SOC_DAIFMT_RIGHT_J:
-                       cf.fmt = HDMI_RIGHT_J;
-                       break;
-               case SND_SOC_DAIFMT_LEFT_J:
-                       cf.fmt = HDMI_LEFT_J;
-                       break;
-               case SND_SOC_DAIFMT_AC97:
-                       cf.fmt = HDMI_AC97;
-                       break;
-               default:
-                       dev_err(dai->dev, "Invalid DAI interface format\n");
-                       return -EINVAL;
-               }
+       switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+       case SND_SOC_DAIFMT_I2S:
+               cf.fmt = HDMI_I2S;
+               break;
+       case SND_SOC_DAIFMT_DSP_A:
+               cf.fmt = HDMI_DSP_A;
+               break;
+       case SND_SOC_DAIFMT_DSP_B:
+               cf.fmt = HDMI_DSP_B;
+               break;
+       case SND_SOC_DAIFMT_RIGHT_J:
+               cf.fmt = HDMI_RIGHT_J;
+               break;
+       case SND_SOC_DAIFMT_LEFT_J:
+               cf.fmt = HDMI_LEFT_J;
+               break;
+       case SND_SOC_DAIFMT_AC97:
+               cf.fmt = HDMI_AC97;
+               break;
+       default:
+               dev_err(dai->dev, "Invalid DAI interface format\n");
+               return -EINVAL;
        }
 
        hcp->daifmt[dai->id] = cf;
 
-       return ret;
+       return 0;
 }
 
 static int hdmi_codec_digital_mute(struct snd_soc_dai *dai, int mute)
@@ -792,8 +787,10 @@ static int hdmi_codec_probe(struct platform_device *pdev)
                i++;
        }
 
-       if (hcd->spdif)
+       if (hcd->spdif) {
                hcp->daidrv[i] = hdmi_spdif_dai;
+               hcp->daifmt[DAI_ID_SPDIF].fmt = HDMI_SPDIF;
+       }
 
        dev_set_drvdata(dev, hcp);
 
index bfd74b86c9d2f43b8e19d8bd4c211c6d2f3cd887..645aa07941237d13cbebdf0d4bf17130f9dae1ae 100644 (file)
@@ -411,9 +411,9 @@ static const struct snd_soc_dapm_widget nau8810_dapm_widgets[] = {
        SND_SOC_DAPM_MIXER("Mono Mixer", NAU8810_REG_POWER3,
                NAU8810_MOUTMX_EN_SFT, 0, &nau8810_mono_mixer_controls[0],
                ARRAY_SIZE(nau8810_mono_mixer_controls)),
-       SND_SOC_DAPM_DAC("DAC", "HiFi Playback", NAU8810_REG_POWER3,
+       SND_SOC_DAPM_DAC("DAC", "Playback", NAU8810_REG_POWER3,
                NAU8810_DAC_EN_SFT, 0),
-       SND_SOC_DAPM_ADC("ADC", "HiFi Capture", NAU8810_REG_POWER2,
+       SND_SOC_DAPM_ADC("ADC", "Capture", NAU8810_REG_POWER2,
                NAU8810_ADC_EN_SFT, 0),
        SND_SOC_DAPM_PGA("SpkN Out", NAU8810_REG_POWER3,
                NAU8810_NSPK_EN_SFT, 0, NULL, 0),
index 87ed3dc496dc2de72043c8dbd5c66cbed7c0f923..5ab05e75edeac61219945488a455eafdc8f785f1 100644 (file)
@@ -681,8 +681,8 @@ static const struct snd_soc_dapm_widget nau8824_dapm_widgets[] = {
        SND_SOC_DAPM_ADC("ADCR", NULL, NAU8824_REG_ANALOG_ADC_2,
                NAU8824_ADCR_EN_SFT, 0),
 
-       SND_SOC_DAPM_AIF_OUT("AIFTX", "HiFi Capture", 0, SND_SOC_NOPM, 0, 0),
-       SND_SOC_DAPM_AIF_IN("AIFRX", "HiFi Playback", 0, SND_SOC_NOPM, 0, 0),
+       SND_SOC_DAPM_AIF_OUT("AIFTX", "Capture", 0, SND_SOC_NOPM, 0, 0),
+       SND_SOC_DAPM_AIF_IN("AIFRX", "Playback", 0, SND_SOC_NOPM, 0, 0),
 
        SND_SOC_DAPM_DAC("DACL", NULL, NAU8824_REG_RDAC,
                NAU8824_DACL_EN_SFT, 0),
@@ -831,6 +831,36 @@ static void nau8824_int_status_clear_all(struct regmap *regmap)
        }
 }
 
+static void nau8824_dapm_disable_pin(struct nau8824 *nau8824, const char *pin)
+{
+       struct snd_soc_dapm_context *dapm = nau8824->dapm;
+       const char *prefix = dapm->component->name_prefix;
+       char prefixed_pin[80];
+
+       if (prefix) {
+               snprintf(prefixed_pin, sizeof(prefixed_pin), "%s %s",
+                        prefix, pin);
+               snd_soc_dapm_disable_pin(dapm, prefixed_pin);
+       } else {
+               snd_soc_dapm_disable_pin(dapm, pin);
+       }
+}
+
+static void nau8824_dapm_enable_pin(struct nau8824 *nau8824, const char *pin)
+{
+       struct snd_soc_dapm_context *dapm = nau8824->dapm;
+       const char *prefix = dapm->component->name_prefix;
+       char prefixed_pin[80];
+
+       if (prefix) {
+               snprintf(prefixed_pin, sizeof(prefixed_pin), "%s %s",
+                        prefix, pin);
+               snd_soc_dapm_force_enable_pin(dapm, prefixed_pin);
+       } else {
+               snd_soc_dapm_force_enable_pin(dapm, pin);
+       }
+}
+
 static void nau8824_eject_jack(struct nau8824 *nau8824)
 {
        struct snd_soc_dapm_context *dapm = nau8824->dapm;
@@ -839,8 +869,8 @@ static void nau8824_eject_jack(struct nau8824 *nau8824)
        /* Clear all interruption status */
        nau8824_int_status_clear_all(regmap);
 
-       snd_soc_dapm_disable_pin(dapm, "SAR");
-       snd_soc_dapm_disable_pin(dapm, "MICBIAS");
+       nau8824_dapm_disable_pin(nau8824, "SAR");
+       nau8824_dapm_disable_pin(nau8824, "MICBIAS");
        snd_soc_dapm_sync(dapm);
 
        /* Enable the insertion interruption, disable the ejection
@@ -870,8 +900,8 @@ static void nau8824_jdet_work(struct work_struct *work)
        struct regmap *regmap = nau8824->regmap;
        int adc_value, event = 0, event_mask = 0;
 
-       snd_soc_dapm_force_enable_pin(dapm, "MICBIAS");
-       snd_soc_dapm_force_enable_pin(dapm, "SAR");
+       nau8824_dapm_enable_pin(nau8824, "MICBIAS");
+       nau8824_dapm_enable_pin(nau8824, "SAR");
        snd_soc_dapm_sync(dapm);
 
        msleep(100);
@@ -882,8 +912,8 @@ static void nau8824_jdet_work(struct work_struct *work)
        if (adc_value < HEADSET_SARADC_THD) {
                event |= SND_JACK_HEADPHONE;
 
-               snd_soc_dapm_disable_pin(dapm, "SAR");
-               snd_soc_dapm_disable_pin(dapm, "MICBIAS");
+               nau8824_dapm_disable_pin(nau8824, "SAR");
+               nau8824_dapm_disable_pin(nau8824, "MICBIAS");
                snd_soc_dapm_sync(dapm);
        } else {
                event |= SND_JACK_HEADSET;
index 9d5acd2d04abd47281b26d8f81331182c0288fb8..86a7fa31c294b2d3fb00494dd3c934976c2a2044 100644 (file)
@@ -910,13 +910,21 @@ static int rt5682_headset_detect(struct snd_soc_component *component,
                int jack_insert)
 {
        struct rt5682_priv *rt5682 = snd_soc_component_get_drvdata(component);
-       struct snd_soc_dapm_context *dapm =
-               snd_soc_component_get_dapm(component);
        unsigned int val, count;
 
        if (jack_insert) {
-               snd_soc_dapm_force_enable_pin(dapm, "CBJ Power");
-               snd_soc_dapm_sync(dapm);
+
+               snd_soc_component_update_bits(component, RT5682_PWR_ANLG_1,
+                       RT5682_PWR_VREF2 | RT5682_PWR_MB,
+                       RT5682_PWR_VREF2 | RT5682_PWR_MB);
+               snd_soc_component_update_bits(component,
+                               RT5682_PWR_ANLG_1, RT5682_PWR_FV2, 0);
+               usleep_range(15000, 20000);
+               snd_soc_component_update_bits(component,
+                               RT5682_PWR_ANLG_1, RT5682_PWR_FV2, RT5682_PWR_FV2);
+               snd_soc_component_update_bits(component, RT5682_PWR_ANLG_3,
+                       RT5682_PWR_CBJ, RT5682_PWR_CBJ);
+
                snd_soc_component_update_bits(component, RT5682_CBJ_CTRL_1,
                        RT5682_TRIG_JD_MASK, RT5682_TRIG_JD_HIGH);
 
@@ -944,8 +952,10 @@ static int rt5682_headset_detect(struct snd_soc_component *component,
                rt5682_enable_push_button_irq(component, false);
                snd_soc_component_update_bits(component, RT5682_CBJ_CTRL_1,
                        RT5682_TRIG_JD_MASK, RT5682_TRIG_JD_LOW);
-               snd_soc_dapm_disable_pin(dapm, "CBJ Power");
-               snd_soc_dapm_sync(dapm);
+               snd_soc_component_update_bits(component, RT5682_PWR_ANLG_1,
+                       RT5682_PWR_VREF2 | RT5682_PWR_MB, 0);
+               snd_soc_component_update_bits(component, RT5682_PWR_ANLG_3,
+                       RT5682_PWR_CBJ, 0);
 
                rt5682->jack_type = 0;
        }
@@ -1198,7 +1208,7 @@ static int set_filter_clk(struct snd_soc_dapm_widget *w,
        struct snd_soc_component *component =
                snd_soc_dapm_to_component(w->dapm);
        struct rt5682_priv *rt5682 = snd_soc_component_get_drvdata(component);
-       int ref, val, reg, sft, mask, idx = -EINVAL;
+       int ref, val, reg, idx = -EINVAL;
        static const int div_f[] = {1, 2, 3, 4, 6, 8, 12, 16, 24, 32, 48};
        static const int div_o[] = {1, 2, 4, 6, 8, 12, 16, 24, 32, 48};
 
@@ -1212,15 +1222,10 @@ static int set_filter_clk(struct snd_soc_dapm_widget *w,
 
        idx = rt5682_div_sel(rt5682, ref, div_f, ARRAY_SIZE(div_f));
 
-       if (w->shift == RT5682_PWR_ADC_S1F_BIT) {
+       if (w->shift == RT5682_PWR_ADC_S1F_BIT)
                reg = RT5682_PLL_TRACK_3;
-               sft = RT5682_ADC_OSR_SFT;
-               mask = RT5682_ADC_OSR_MASK;
-       } else {
+       else
                reg = RT5682_PLL_TRACK_2;
-               sft = RT5682_DAC_OSR_SFT;
-               mask = RT5682_DAC_OSR_MASK;
-       }
 
        snd_soc_component_update_bits(component, reg,
                RT5682_FILTER_CLK_DIV_MASK, idx << RT5682_FILTER_CLK_DIV_SFT);
@@ -1232,7 +1237,8 @@ static int set_filter_clk(struct snd_soc_dapm_widget *w,
        }
 
        snd_soc_component_update_bits(component, RT5682_ADDA_CLK_1,
-               mask, idx << sft);
+               RT5682_ADC_OSR_MASK | RT5682_DAC_OSR_MASK,
+               (idx << RT5682_ADC_OSR_SFT) | (idx << RT5682_DAC_OSR_SFT));
 
        return 0;
 }
@@ -1591,8 +1597,6 @@ static const struct snd_soc_dapm_widget rt5682_dapm_widgets[] = {
                0, NULL, 0),
        SND_SOC_DAPM_SUPPLY("Vref1", RT5682_PWR_ANLG_1, RT5682_PWR_VREF1_BIT, 0,
                rt5655_set_verf, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU),
-       SND_SOC_DAPM_SUPPLY("Vref2", RT5682_PWR_ANLG_1, RT5682_PWR_VREF2_BIT, 0,
-               rt5655_set_verf, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU),
 
        /* ASRC */
        SND_SOC_DAPM_SUPPLY_S("DAC STO1 ASRC", 1, RT5682_PLL_TRACK_1,
@@ -1627,9 +1631,6 @@ static const struct snd_soc_dapm_widget rt5682_dapm_widgets[] = {
        SND_SOC_DAPM_PGA("BST1 CBJ", SND_SOC_NOPM,
                0, 0, NULL, 0),
 
-       SND_SOC_DAPM_SUPPLY("CBJ Power", RT5682_PWR_ANLG_3,
-               RT5682_PWR_CBJ_BIT, 0, NULL, 0),
-
        /* REC Mixer */
        SND_SOC_DAPM_MIXER("RECMIX1L", SND_SOC_NOPM, 0, 0, rt5682_rec1_l_mix,
                ARRAY_SIZE(rt5682_rec1_l_mix)),
@@ -1792,17 +1793,13 @@ static const struct snd_soc_dapm_route rt5682_dapm_routes[] = {
 
        /*Vref*/
        {"MICBIAS1", NULL, "Vref1"},
-       {"MICBIAS1", NULL, "Vref2"},
        {"MICBIAS2", NULL, "Vref1"},
-       {"MICBIAS2", NULL, "Vref2"},
 
        {"CLKDET SYS", NULL, "CLKDET"},
 
        {"IN1P", NULL, "LDO2"},
 
        {"BST1 CBJ", NULL, "IN1P"},
-       {"BST1 CBJ", NULL, "CBJ Power"},
-       {"CBJ Power", NULL, "Vref2"},
 
        {"RECMIX1L", "CBJ Switch", "BST1 CBJ"},
        {"RECMIX1L", NULL, "RECMIX1L Power"},
@@ -1912,9 +1909,7 @@ static const struct snd_soc_dapm_route rt5682_dapm_routes[] = {
        {"HP Amp", NULL, "Capless"},
        {"HP Amp", NULL, "Charge Pump"},
        {"HP Amp", NULL, "CLKDET SYS"},
-       {"HP Amp", NULL, "CBJ Power"},
        {"HP Amp", NULL, "Vref1"},
-       {"HP Amp", NULL, "Vref2"},
        {"HPOL Playback", "Switch", "HP Amp"},
        {"HPOR Playback", "Switch", "HP Amp"},
        {"HPOL", NULL, "HPOL Playback"},
@@ -2303,16 +2298,13 @@ static int rt5682_set_bias_level(struct snd_soc_component *component,
        switch (level) {
        case SND_SOC_BIAS_PREPARE:
                regmap_update_bits(rt5682->regmap, RT5682_PWR_ANLG_1,
-                       RT5682_PWR_MB | RT5682_PWR_BG,
-                       RT5682_PWR_MB | RT5682_PWR_BG);
+                       RT5682_PWR_BG, RT5682_PWR_BG);
                regmap_update_bits(rt5682->regmap, RT5682_PWR_DIG_1,
                        RT5682_DIG_GATE_CTRL | RT5682_PWR_LDO,
                        RT5682_DIG_GATE_CTRL | RT5682_PWR_LDO);
                break;
 
        case SND_SOC_BIAS_STANDBY:
-               regmap_update_bits(rt5682->regmap, RT5682_PWR_ANLG_1,
-                       RT5682_PWR_MB, RT5682_PWR_MB);
                regmap_update_bits(rt5682->regmap, RT5682_PWR_DIG_1,
                        RT5682_DIG_GATE_CTRL, RT5682_DIG_GATE_CTRL);
                break;
@@ -2320,7 +2312,7 @@ static int rt5682_set_bias_level(struct snd_soc_component *component,
                regmap_update_bits(rt5682->regmap, RT5682_PWR_DIG_1,
                        RT5682_DIG_GATE_CTRL | RT5682_PWR_LDO, 0);
                regmap_update_bits(rt5682->regmap, RT5682_PWR_ANLG_1,
-                       RT5682_PWR_MB | RT5682_PWR_BG, 0);
+                       RT5682_PWR_BG, 0);
                break;
 
        default:
@@ -2363,6 +2355,8 @@ static int rt5682_resume(struct snd_soc_component *component)
        regcache_cache_only(rt5682->regmap, false);
        regcache_sync(rt5682->regmap);
 
+       rt5682_irq(0, rt5682);
+
        return 0;
 }
 #else
index 385fa2e9525abe2c89fc624baef239b2541b7328..22c3a6bc0b6c47ae90de8fc435127b089fb80517 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Copyright 2011 NW Digital Radio
  *
- * Author: Jeremy McDermond <nh6z@nh6z.net>
+ * Author: Annaliese McDermond <nh6z@nh6z.net>
  *
  * Based on sound/soc/codecs/wm8974 and TI driver for kernel 2.6.27.
  *
@@ -72,5 +72,5 @@ static struct i2c_driver aic32x4_i2c_driver = {
 module_i2c_driver(aic32x4_i2c_driver);
 
 MODULE_DESCRIPTION("ASoC TLV320AIC32x4 codec driver I2C");
-MODULE_AUTHOR("Jeremy McDermond <nh6z@nh6z.net>");
+MODULE_AUTHOR("Annaliese McDermond <nh6z@nh6z.net>");
 MODULE_LICENSE("GPL");
index 07d78ae51e05c77bbd0a93410d0df2449be5d759..aa5b7ba0254bc6b7e009ce2adb6099d2d138819c 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Copyright 2011 NW Digital Radio
  *
- * Author: Jeremy McDermond <nh6z@nh6z.net>
+ * Author: Annaliese McDermond <nh6z@nh6z.net>
  *
  * Based on sound/soc/codecs/wm8974 and TI driver for kernel 2.6.27.
  *
@@ -74,5 +74,5 @@ static struct spi_driver aic32x4_spi_driver = {
 module_spi_driver(aic32x4_spi_driver);
 
 MODULE_DESCRIPTION("ASoC TLV320AIC32x4 codec driver SPI");
-MODULE_AUTHOR("Jeremy McDermond <nh6z@nh6z.net>");
+MODULE_AUTHOR("Annaliese McDermond <nh6z@nh6z.net>");
 MODULE_LICENSE("GPL");
index 96f1526cb258a4e718afb86712aece3608c18003..5520044929f42ff6c51d18fb8338602127c79b91 100644 (file)
@@ -490,6 +490,8 @@ static const struct snd_soc_dapm_widget aic32x4_dapm_widgets[] = {
        SND_SOC_DAPM_INPUT("IN2_R"),
        SND_SOC_DAPM_INPUT("IN3_L"),
        SND_SOC_DAPM_INPUT("IN3_R"),
+       SND_SOC_DAPM_INPUT("CM_L"),
+       SND_SOC_DAPM_INPUT("CM_R"),
 };
 
 static const struct snd_soc_dapm_route aic32x4_dapm_routes[] = {
index 283583d1db60555f0831229be3b2f0e1a65bafee..516d17cb2182287f8f739a072fe3128687cecd93 100644 (file)
@@ -1609,7 +1609,6 @@ static int aic3x_probe(struct snd_soc_component *component)
        struct aic3x_priv *aic3x = snd_soc_component_get_drvdata(component);
        int ret, i;
 
-       INIT_LIST_HEAD(&aic3x->list);
        aic3x->component = component;
 
        for (i = 0; i < ARRAY_SIZE(aic3x->supplies); i++) {
@@ -1873,6 +1872,7 @@ static int aic3x_i2c_probe(struct i2c_client *i2c,
        if (ret != 0)
                goto err_gpio;
 
+       INIT_LIST_HEAD(&aic3x->list);
        list_add(&aic3x->list, &reset_list);
 
        return 0;
@@ -1889,6 +1889,8 @@ static int aic3x_i2c_remove(struct i2c_client *client)
 {
        struct aic3x_priv *aic3x = i2c_get_clientdata(client);
 
+       list_del(&aic3x->list);
+
        if (gpio_is_valid(aic3x->gpio_reset) &&
            !aic3x_is_shared_reset(aic3x)) {
                gpio_set_value(aic3x->gpio_reset, 0);
index b93fdc8d2d6fb76112eabc4fe2a6a302fa86b710..b0b48eb9c7c91578cb5d588955a0afa74f0ff262 100644 (file)
@@ -2905,6 +2905,8 @@ int wm_adsp2_event(struct snd_soc_dapm_widget *w,
                if (wm_adsp_fw[dsp->fw].num_caps != 0)
                        wm_adsp_buffer_free(dsp);
 
+               dsp->fatal_error = false;
+
                mutex_unlock(&dsp->pwr_lock);
 
                adsp_dbg(dsp, "Execution stopped\n");
@@ -3000,6 +3002,9 @@ static int wm_adsp_compr_attach(struct wm_adsp_compr *compr)
 {
        struct wm_adsp_compr_buf *buf = NULL, *tmp;
 
+       if (compr->dsp->fatal_error)
+               return -EINVAL;
+
        list_for_each_entry(tmp, &compr->dsp->buffer_list, list) {
                if (!tmp->name || !strcmp(compr->name, tmp->name)) {
                        buf = tmp;
@@ -3535,11 +3540,11 @@ static int wm_adsp_buffer_get_error(struct wm_adsp_compr_buf *buf)
 
        ret = wm_adsp_buffer_read(buf, HOST_BUFFER_FIELD(error), &buf->error);
        if (ret < 0) {
-               adsp_err(buf->dsp, "Failed to check buffer error: %d\n", ret);
+               compr_err(buf, "Failed to check buffer error: %d\n", ret);
                return ret;
        }
        if (buf->error != 0) {
-               adsp_err(buf->dsp, "Buffer error occurred: %d\n", buf->error);
+               compr_err(buf, "Buffer error occurred: %d\n", buf->error);
                return -EIO;
        }
 
@@ -3571,8 +3576,6 @@ int wm_adsp_compr_trigger(struct snd_compr_stream *stream, int cmd)
                if (ret < 0)
                        break;
 
-               wm_adsp_buffer_clear(compr->buf);
-
                /* Trigger the IRQ at one fragment of data */
                ret = wm_adsp_buffer_write(compr->buf,
                                           HOST_BUFFER_FIELD(high_water_mark),
@@ -3584,6 +3587,8 @@ int wm_adsp_compr_trigger(struct snd_compr_stream *stream, int cmd)
                }
                break;
        case SNDRV_PCM_TRIGGER_STOP:
+               if (wm_adsp_compr_attached(compr))
+                       wm_adsp_buffer_clear(compr->buf);
                break;
        default:
                ret = -EINVAL;
@@ -3917,22 +3922,40 @@ int wm_adsp2_lock(struct wm_adsp *dsp, unsigned int lock_regions)
 }
 EXPORT_SYMBOL_GPL(wm_adsp2_lock);
 
+static void wm_adsp_fatal_error(struct wm_adsp *dsp)
+{
+       struct wm_adsp_compr *compr;
+
+       dsp->fatal_error = true;
+
+       list_for_each_entry(compr, &dsp->compr_list, list) {
+               if (compr->stream) {
+                       snd_compr_stop_error(compr->stream,
+                                            SNDRV_PCM_STATE_XRUN);
+                       snd_compr_fragment_elapsed(compr->stream);
+               }
+       }
+}
+
 irqreturn_t wm_adsp2_bus_error(struct wm_adsp *dsp)
 {
        unsigned int val;
        struct regmap *regmap = dsp->regmap;
        int ret = 0;
 
+       mutex_lock(&dsp->pwr_lock);
+
        ret = regmap_read(regmap, dsp->base + ADSP2_LOCK_REGION_CTRL, &val);
        if (ret) {
                adsp_err(dsp,
                        "Failed to read Region Lock Ctrl register: %d\n", ret);
-               return IRQ_HANDLED;
+               goto error;
        }
 
        if (val & ADSP2_WDT_TIMEOUT_STS_MASK) {
                adsp_err(dsp, "watchdog timeout error\n");
                wm_adsp_stop_watchdog(dsp);
+               wm_adsp_fatal_error(dsp);
        }
 
        if (val & (ADSP2_SLAVE_ERR_MASK | ADSP2_REGION_LOCK_ERR_MASK)) {
@@ -3946,7 +3969,7 @@ irqreturn_t wm_adsp2_bus_error(struct wm_adsp *dsp)
                        adsp_err(dsp,
                                 "Failed to read Bus Err Addr register: %d\n",
                                 ret);
-                       return IRQ_HANDLED;
+                       goto error;
                }
 
                adsp_err(dsp, "bus error address = 0x%x\n",
@@ -3959,7 +3982,7 @@ irqreturn_t wm_adsp2_bus_error(struct wm_adsp *dsp)
                        adsp_err(dsp,
                                 "Failed to read Pmem Xmem Err Addr register: %d\n",
                                 ret);
-                       return IRQ_HANDLED;
+                       goto error;
                }
 
                adsp_err(dsp, "xmem error address = 0x%x\n",
@@ -3972,6 +3995,9 @@ irqreturn_t wm_adsp2_bus_error(struct wm_adsp *dsp)
        regmap_update_bits(regmap, dsp->base + ADSP2_LOCK_REGION_CTRL,
                           ADSP2_CTRL_ERR_EINT, ADSP2_CTRL_ERR_EINT);
 
+error:
+       mutex_unlock(&dsp->pwr_lock);
+
        return IRQ_HANDLED;
 }
 EXPORT_SYMBOL_GPL(wm_adsp2_bus_error);
index 59e07ad163296c3ff21f238ccdfca96b6af43c76..8f09b4419a914ae773558c6175529b1acebf9a82 100644 (file)
@@ -85,6 +85,7 @@ struct wm_adsp {
        bool preloaded;
        bool booted;
        bool running;
+       bool fatal_error;
 
        struct list_head ctl_list;
 
index 528e8b108422971eea52655b55642ac5cdbe575d..0b937924d2e47961d697d068edf3944772058baa 100644 (file)
@@ -445,6 +445,19 @@ struct dma_chan *fsl_asrc_get_dma_channel(struct fsl_asrc_pair *pair, bool dir)
 }
 EXPORT_SYMBOL_GPL(fsl_asrc_get_dma_channel);
 
+static int fsl_asrc_dai_startup(struct snd_pcm_substream *substream,
+                               struct snd_soc_dai *dai)
+{
+       struct fsl_asrc *asrc_priv = snd_soc_dai_get_drvdata(dai);
+
+       /* Odd channel number is not valid for older ASRC (channel_bits==3) */
+       if (asrc_priv->channel_bits == 3)
+               snd_pcm_hw_constraint_step(substream->runtime, 0,
+                                          SNDRV_PCM_HW_PARAM_CHANNELS, 2);
+
+       return 0;
+}
+
 static int fsl_asrc_dai_hw_params(struct snd_pcm_substream *substream,
                                  struct snd_pcm_hw_params *params,
                                  struct snd_soc_dai *dai)
@@ -539,6 +552,7 @@ static int fsl_asrc_dai_trigger(struct snd_pcm_substream *substream, int cmd,
 }
 
 static const struct snd_soc_dai_ops fsl_asrc_dai_ops = {
+       .startup      = fsl_asrc_dai_startup,
        .hw_params    = fsl_asrc_dai_hw_params,
        .hw_free      = fsl_asrc_dai_hw_free,
        .trigger      = fsl_asrc_dai_trigger,
index afe67c865330e39c7b3d1b30bd6127dd764f42aa..3623aa9a6f2ea7838e2c855a5d88681436ac11c1 100644 (file)
@@ -54,6 +54,8 @@ struct fsl_esai {
        u32 fifo_depth;
        u32 slot_width;
        u32 slots;
+       u32 tx_mask;
+       u32 rx_mask;
        u32 hck_rate[2];
        u32 sck_rate[2];
        bool hck_dir[2];
@@ -361,21 +363,13 @@ static int fsl_esai_set_dai_tdm_slot(struct snd_soc_dai *dai, u32 tx_mask,
        regmap_update_bits(esai_priv->regmap, REG_ESAI_TCCR,
                           ESAI_xCCR_xDC_MASK, ESAI_xCCR_xDC(slots));
 
-       regmap_update_bits(esai_priv->regmap, REG_ESAI_TSMA,
-                          ESAI_xSMA_xS_MASK, ESAI_xSMA_xS(tx_mask));
-       regmap_update_bits(esai_priv->regmap, REG_ESAI_TSMB,
-                          ESAI_xSMB_xS_MASK, ESAI_xSMB_xS(tx_mask));
-
        regmap_update_bits(esai_priv->regmap, REG_ESAI_RCCR,
                           ESAI_xCCR_xDC_MASK, ESAI_xCCR_xDC(slots));
 
-       regmap_update_bits(esai_priv->regmap, REG_ESAI_RSMA,
-                          ESAI_xSMA_xS_MASK, ESAI_xSMA_xS(rx_mask));
-       regmap_update_bits(esai_priv->regmap, REG_ESAI_RSMB,
-                          ESAI_xSMB_xS_MASK, ESAI_xSMB_xS(rx_mask));
-
        esai_priv->slot_width = slot_width;
        esai_priv->slots = slots;
+       esai_priv->tx_mask = tx_mask;
+       esai_priv->rx_mask = rx_mask;
 
        return 0;
 }
@@ -596,6 +590,7 @@ static int fsl_esai_trigger(struct snd_pcm_substream *substream, int cmd,
        bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
        u8 i, channels = substream->runtime->channels;
        u32 pins = DIV_ROUND_UP(channels, esai_priv->slots);
+       u32 mask;
 
        switch (cmd) {
        case SNDRV_PCM_TRIGGER_START:
@@ -608,15 +603,38 @@ static int fsl_esai_trigger(struct snd_pcm_substream *substream, int cmd,
                for (i = 0; tx && i < channels; i++)
                        regmap_write(esai_priv->regmap, REG_ESAI_ETDR, 0x0);
 
+               /*
+                * When set the TE/RE in the end of enablement flow, there
+                * will be channel swap issue for multi data line case.
+                * In order to workaround this issue, we switch the bit
+                * enablement sequence to below sequence
+                * 1) clear the xSMB & xSMA: which is done in probe and
+                *                           stop state.
+                * 2) set TE/RE
+                * 3) set xSMB
+                * 4) set xSMA:  xSMA is the last one in this flow, which
+                *               will trigger esai to start.
+                */
                regmap_update_bits(esai_priv->regmap, REG_ESAI_xCR(tx),
                                   tx ? ESAI_xCR_TE_MASK : ESAI_xCR_RE_MASK,
                                   tx ? ESAI_xCR_TE(pins) : ESAI_xCR_RE(pins));
+               mask = tx ? esai_priv->tx_mask : esai_priv->rx_mask;
+
+               regmap_update_bits(esai_priv->regmap, REG_ESAI_xSMB(tx),
+                                  ESAI_xSMB_xS_MASK, ESAI_xSMB_xS(mask));
+               regmap_update_bits(esai_priv->regmap, REG_ESAI_xSMA(tx),
+                                  ESAI_xSMA_xS_MASK, ESAI_xSMA_xS(mask));
+
                break;
        case SNDRV_PCM_TRIGGER_SUSPEND:
        case SNDRV_PCM_TRIGGER_STOP:
        case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
                regmap_update_bits(esai_priv->regmap, REG_ESAI_xCR(tx),
                                   tx ? ESAI_xCR_TE_MASK : ESAI_xCR_RE_MASK, 0);
+               regmap_update_bits(esai_priv->regmap, REG_ESAI_xSMA(tx),
+                                  ESAI_xSMA_xS_MASK, 0);
+               regmap_update_bits(esai_priv->regmap, REG_ESAI_xSMB(tx),
+                                  ESAI_xSMB_xS_MASK, 0);
 
                /* Disable and reset FIFO */
                regmap_update_bits(esai_priv->regmap, REG_ESAI_xFCR(tx),
@@ -906,6 +924,15 @@ static int fsl_esai_probe(struct platform_device *pdev)
                return ret;
        }
 
+       esai_priv->tx_mask = 0xFFFFFFFF;
+       esai_priv->rx_mask = 0xFFFFFFFF;
+
+       /* Clear the TSMA, TSMB, RSMA, RSMB */
+       regmap_write(esai_priv->regmap, REG_ESAI_TSMA, 0);
+       regmap_write(esai_priv->regmap, REG_ESAI_TSMB, 0);
+       regmap_write(esai_priv->regmap, REG_ESAI_RSMA, 0);
+       regmap_write(esai_priv->regmap, REG_ESAI_RSMB, 0);
+
        ret = devm_snd_soc_register_component(&pdev->dev, &fsl_esai_component,
                                              &fsl_esai_dai, 1);
        if (ret) {
index bb12351330e8c0f307b0237ee2e2f1b6b2f9aeec..69bc4848d7876cec544d4ab5067230092953b806 100644 (file)
@@ -20,6 +20,8 @@
 #include <linux/string.h>
 #include <sound/simple_card_utils.h>
 
+#define DPCM_SELECTABLE 1
+
 struct graph_priv {
        struct snd_soc_card snd_card;
        struct graph_dai_props {
@@ -440,6 +442,7 @@ static int graph_for_each_link(struct graph_priv *priv,
        struct device_node *codec_port;
        struct device_node *codec_port_old = NULL;
        struct asoc_simple_card_data adata;
+       uintptr_t dpcm_selectable = (uintptr_t)of_device_get_match_data(dev);
        int rc, ret;
 
        /* loop for all listed CPU port */
@@ -470,8 +473,9 @@ static int graph_for_each_link(struct graph_priv *priv,
                         * if Codec port has many endpoints,
                         * or has convert-xxx property
                         */
-                       if ((of_get_child_count(codec_port) > 1) ||
-                           adata.convert_rate || adata.convert_channels)
+                       if (dpcm_selectable &&
+                           ((of_get_child_count(codec_port) > 1) ||
+                            adata.convert_rate || adata.convert_channels))
                                ret = func_dpcm(priv, cpu_ep, codec_ep, li,
                                                (codec_port_old == codec_port));
                        /* else normal sound */
@@ -732,7 +736,8 @@ static int graph_remove(struct platform_device *pdev)
 
 static const struct of_device_id graph_of_match[] = {
        { .compatible = "audio-graph-card", },
-       { .compatible = "audio-graph-scu-card", },
+       { .compatible = "audio-graph-scu-card",
+         .data = (void *)DPCM_SELECTABLE },
        {},
 };
 MODULE_DEVICE_TABLE(of, graph_of_match);
index 7147bba45a2a61b0830ed49e6057edb3935c9c03..34de32efc4c4defd14c823b904931f50886b9c69 100644 (file)
@@ -9,12 +9,15 @@
 #include <linux/device.h>
 #include <linux/module.h>
 #include <linux/of.h>
+#include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/string.h>
 #include <sound/simple_card.h>
 #include <sound/soc-dai.h>
 #include <sound/soc.h>
 
+#define DPCM_SELECTABLE 1
+
 struct simple_priv {
        struct snd_soc_card snd_card;
        struct simple_dai_props {
@@ -441,6 +444,7 @@ static int simple_for_each_link(struct simple_priv *priv,
        struct device *dev = simple_priv_to_dev(priv);
        struct device_node *top = dev->of_node;
        struct device_node *node;
+       uintptr_t dpcm_selectable = (uintptr_t)of_device_get_match_data(dev);
        bool is_top = 0;
        int ret = 0;
 
@@ -480,8 +484,9 @@ static int simple_for_each_link(struct simple_priv *priv,
                         * if it has many CPUs,
                         * or has convert-xxx property
                         */
-                       if (num > 2 ||
-                           adata.convert_rate || adata.convert_channels)
+                       if (dpcm_selectable &&
+                           (num > 2 ||
+                            adata.convert_rate || adata.convert_channels))
                                ret = func_dpcm(priv, np, codec, li, is_top);
                        /* else normal sound */
                        else
@@ -822,7 +827,8 @@ static int simple_remove(struct platform_device *pdev)
 
 static const struct of_device_id simple_of_match[] = {
        { .compatible = "simple-audio-card", },
-       { .compatible = "simple-scu-audio-card", },
+       { .compatible = "simple-scu-audio-card",
+         .data = (void *)DPCM_SELECTABLE },
        {},
 };
 MODULE_DEVICE_TABLE(of, simple_of_match);
index 08cea5b5cda9fa9f6f617213c1fba5beef4bd489..0e8b1c5eec888b4c988206a04a3a8e1ca65e14aa 100644 (file)
@@ -706,9 +706,17 @@ static int sst_soc_probe(struct snd_soc_component *component)
        return sst_dsp_init_v2_dpcm(component);
 }
 
+static void sst_soc_remove(struct snd_soc_component *component)
+{
+       struct sst_data *drv = dev_get_drvdata(component->dev);
+
+       drv->soc_card = NULL;
+}
+
 static const struct snd_soc_component_driver sst_soc_platform_drv  = {
        .name           = DRV_NAME,
        .probe          = sst_soc_probe,
+       .remove         = sst_soc_remove,
        .ops            = &sst_platform_ops,
        .compr_ops      = &sst_platform_compr_ops,
        .pcm_new        = sst_pcm_new,
index 3263b0495853c2d57e22cb1a90d6cc7515deb952..c0e0844f75b9fe891fc7352c25e957c800f5e4b6 100644 (file)
@@ -43,6 +43,7 @@ struct cht_mc_private {
        struct clk *mclk;
        struct snd_soc_jack jack;
        bool ts3a227e_present;
+       int quirks;
 };
 
 static int platform_clock_control(struct snd_soc_dapm_widget *w,
@@ -54,6 +55,10 @@ static int platform_clock_control(struct snd_soc_dapm_widget *w,
        struct cht_mc_private *ctx = snd_soc_card_get_drvdata(card);
        int ret;
 
+       /* See the comment in snd_cht_mc_probe() */
+       if (ctx->quirks & QUIRK_PMC_PLT_CLK_0)
+               return 0;
+
        codec_dai = snd_soc_card_get_codec_dai(card, CHT_CODEC_DAI);
        if (!codec_dai) {
                dev_err(card->dev, "Codec dai not found; Unable to set platform clock\n");
@@ -223,6 +228,10 @@ static int cht_codec_init(struct snd_soc_pcm_runtime *runtime)
                        "jack detection gpios not added, error %d\n", ret);
        }
 
+       /* See the comment in snd_cht_mc_probe() */
+       if (ctx->quirks & QUIRK_PMC_PLT_CLK_0)
+               return 0;
+
        /*
         * The firmware might enable the clock at
         * boot (this information may or may not
@@ -423,16 +432,15 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
        const char *mclk_name;
        struct snd_soc_acpi_mach *mach;
        const char *platform_name;
-       int quirks = 0;
-
-       dmi_id = dmi_first_match(cht_max98090_quirk_table);
-       if (dmi_id)
-               quirks = (unsigned long)dmi_id->driver_data;
 
        drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
        if (!drv)
                return -ENOMEM;
 
+       dmi_id = dmi_first_match(cht_max98090_quirk_table);
+       if (dmi_id)
+               drv->quirks = (unsigned long)dmi_id->driver_data;
+
        drv->ts3a227e_present = acpi_dev_found("104C227E");
        if (!drv->ts3a227e_present) {
                /* no need probe TI jack detection chip */
@@ -458,7 +466,7 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
        snd_soc_card_cht.dev = &pdev->dev;
        snd_soc_card_set_drvdata(&snd_soc_card_cht, drv);
 
-       if (quirks & QUIRK_PMC_PLT_CLK_0)
+       if (drv->quirks & QUIRK_PMC_PLT_CLK_0)
                mclk_name = "pmc_plt_clk_0";
        else
                mclk_name = "pmc_plt_clk_3";
@@ -471,6 +479,21 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
                return PTR_ERR(drv->mclk);
        }
 
+       /*
+        * Boards which have the MAX98090's clk connected to clk_0 do not seem
+        * to like it if we muck with the clock. If we disable the clock when
+        * it is unused we get "max98090 i2c-193C9890:00: PLL unlocked" errors
+        * and the PLL never seems to lock again.
+        * So for these boards we enable it here once and leave it at that.
+        */
+       if (drv->quirks & QUIRK_PMC_PLT_CLK_0) {
+               ret_val = clk_prepare_enable(drv->mclk);
+               if (ret_val < 0) {
+                       dev_err(&pdev->dev, "MCLK enable error: %d\n", ret_val);
+                       return ret_val;
+               }
+       }
+
        ret_val = devm_snd_soc_register_card(&pdev->dev, &snd_soc_card_cht);
        if (ret_val) {
                dev_err(&pdev->dev,
@@ -481,11 +504,23 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
        return ret_val;
 }
 
+static int snd_cht_mc_remove(struct platform_device *pdev)
+{
+       struct snd_soc_card *card = platform_get_drvdata(pdev);
+       struct cht_mc_private *ctx = snd_soc_card_get_drvdata(card);
+
+       if (ctx->quirks & QUIRK_PMC_PLT_CLK_0)
+               clk_disable_unprepare(ctx->mclk);
+
+       return 0;
+}
+
 static struct platform_driver snd_cht_mc_driver = {
        .driver = {
                .name = "cht-bsw-max98090",
        },
        .probe = snd_cht_mc_probe,
+       .remove = snd_cht_mc_remove,
 };
 
 module_platform_driver(snd_cht_mc_driver)
index 7044d8c2b187375cd6fd44b3342eedf4b37fd8d1..879f14257a3ea4c8fae5eaa683c365e519ecfc17 100644 (file)
@@ -405,7 +405,7 @@ static const struct snd_pcm_hw_constraint_list constraints_dmic_channels = {
 };
 
 static const unsigned int dmic_2ch[] = {
-       4,
+       2,
 };
 
 static const struct snd_pcm_hw_constraint_list constraints_dmic_2ch = {
index 28c4806b196a2fc3505ce88e298b834cf64e1c56..4bf70b4429f03075b07d877c67f6003d15d5336d 100644 (file)
@@ -483,6 +483,7 @@ static void skl_set_base_module_format(struct skl_sst *ctx,
        base_cfg->audio_fmt.bit_depth = format->bit_depth;
        base_cfg->audio_fmt.valid_bit_depth = format->valid_bit_depth;
        base_cfg->audio_fmt.ch_cfg = format->ch_cfg;
+       base_cfg->audio_fmt.sample_type = format->sample_type;
 
        dev_dbg(ctx->dev, "bit_depth=%x valid_bd=%x ch_config=%x\n",
                        format->bit_depth, format->valid_bit_depth,
index 1ae83f4ccc3615bfa42e28d08d43f27870526ded..9735e24122514f81d9ee88002fdbdd32ade65e4e 100644 (file)
@@ -181,6 +181,7 @@ int skl_pcm_link_dma_prepare(struct device *dev, struct skl_pipe_params *params)
        struct hdac_stream *hstream;
        struct hdac_ext_stream *stream;
        struct hdac_ext_link *link;
+       unsigned char stream_tag;
 
        hstream = snd_hdac_get_stream(bus, params->stream,
                                        params->link_dma_id + 1);
@@ -199,10 +200,13 @@ int skl_pcm_link_dma_prepare(struct device *dev, struct skl_pipe_params *params)
 
        snd_hdac_ext_link_stream_setup(stream, format_val);
 
-       list_for_each_entry(link, &bus->hlink_list, list) {
-               if (link->index == params->link_index)
-                       snd_hdac_ext_link_set_stream_id(link,
-                                       hstream->stream_tag);
+       stream_tag = hstream->stream_tag;
+       if (stream->hstream.direction == SNDRV_PCM_STREAM_PLAYBACK) {
+               list_for_each_entry(link, &bus->hlink_list, list) {
+                       if (link->index == params->link_index)
+                               snd_hdac_ext_link_set_stream_id(link,
+                                                               stream_tag);
+               }
        }
 
        stream->link_prepared = 1;
@@ -645,6 +649,7 @@ static int skl_link_hw_free(struct snd_pcm_substream *substream,
        struct hdac_ext_stream *link_dev =
                                snd_soc_dai_get_dma_data(dai, substream);
        struct hdac_ext_link *link;
+       unsigned char stream_tag;
 
        dev_dbg(dai->dev, "%s: %s\n", __func__, dai->name);
 
@@ -654,7 +659,11 @@ static int skl_link_hw_free(struct snd_pcm_substream *substream,
        if (!link)
                return -EINVAL;
 
-       snd_hdac_ext_link_clear_stream_id(link, hdac_stream(link_dev)->stream_tag);
+       if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+               stream_tag = hdac_stream(link_dev)->stream_tag;
+               snd_hdac_ext_link_clear_stream_id(link, stream_tag);
+       }
+
        snd_hdac_ext_stream_release(link_dev, HDAC_EXT_STREAM_TYPE_LINK);
        return 0;
 }
@@ -1453,13 +1462,20 @@ static int skl_platform_soc_probe(struct snd_soc_component *component)
        return 0;
 }
 
+static void skl_pcm_remove(struct snd_soc_component *component)
+{
+       /* remove topology */
+       snd_soc_tplg_component_remove(component, SND_SOC_TPLG_INDEX_ALL);
+}
+
 static const struct snd_soc_component_driver skl_component  = {
        .name           = "pcm",
        .probe          = skl_platform_soc_probe,
+       .remove         = skl_pcm_remove,
        .ops            = &skl_platform_ops,
        .pcm_new        = skl_pcm_new,
        .pcm_free       = skl_pcm_free,
-       .ignore_module_refcount = 1, /* do not increase the refcount in core */
+       .module_get_upon_open = 1, /* increment refcount when a pcm is opened */
 };
 
 int skl_platform_register(struct device *dev)
index 1b8bcdaf02d116cbc124aac6597f3a8a501b468d..9a163d7064d174ff1e4142cb09b31adeaa9c6f59 100644 (file)
@@ -49,6 +49,7 @@ enum bt_sco_state {
        BT_SCO_STATE_IDLE,
        BT_SCO_STATE_RUNNING,
        BT_SCO_STATE_ENDING,
+       BT_SCO_STATE_LOOPBACK,
 };
 
 enum bt_sco_direct {
@@ -486,7 +487,8 @@ static irqreturn_t mtk_btcvsd_snd_irq_handler(int irq_id, void *dev)
        if (bt->rx->state != BT_SCO_STATE_RUNNING &&
            bt->rx->state != BT_SCO_STATE_ENDING &&
            bt->tx->state != BT_SCO_STATE_RUNNING &&
-           bt->tx->state != BT_SCO_STATE_ENDING) {
+           bt->tx->state != BT_SCO_STATE_ENDING &&
+           bt->tx->state != BT_SCO_STATE_LOOPBACK) {
                dev_warn(bt->dev, "%s(), in idle state: rx->state: %d, tx->state: %d\n",
                         __func__, bt->rx->state, bt->tx->state);
                goto irq_handler_exit;
@@ -512,6 +514,42 @@ static irqreturn_t mtk_btcvsd_snd_irq_handler(int irq_id, void *dev)
        buf_cnt_tx = btsco_packet_info[packet_type][2];
        buf_cnt_rx = btsco_packet_info[packet_type][3];
 
+       if (bt->tx->state == BT_SCO_STATE_LOOPBACK) {
+               u8 *src, *dst;
+               unsigned long connsys_addr_rx, ap_addr_rx;
+               unsigned long connsys_addr_tx, ap_addr_tx;
+
+               connsys_addr_rx = *bt->bt_reg_pkt_r;
+               ap_addr_rx = (unsigned long)bt->bt_sram_bank2_base +
+                            (connsys_addr_rx & 0xFFFF);
+
+               connsys_addr_tx = *bt->bt_reg_pkt_w;
+               ap_addr_tx = (unsigned long)bt->bt_sram_bank2_base +
+                            (connsys_addr_tx & 0xFFFF);
+
+               if (connsys_addr_tx == 0xdeadfeed ||
+                   connsys_addr_rx == 0xdeadfeed) {
+                       /* bt return 0xdeadfeed if read reg during bt sleep */
+                       dev_warn(bt->dev, "%s(), connsys_addr_tx == 0xdeadfeed\n",
+                                __func__);
+                       goto irq_handler_exit;
+               }
+
+               src = (u8 *)ap_addr_rx;
+               dst = (u8 *)ap_addr_tx;
+
+               mtk_btcvsd_snd_data_transfer(BT_SCO_DIRECT_BT2ARM, src,
+                                            bt->tx->temp_packet_buf,
+                                            packet_length,
+                                            packet_num);
+               mtk_btcvsd_snd_data_transfer(BT_SCO_DIRECT_ARM2BT,
+                                            bt->tx->temp_packet_buf, dst,
+                                            packet_length,
+                                            packet_num);
+               bt->rx->rw_cnt++;
+               bt->tx->rw_cnt++;
+       }
+
        if (bt->rx->state == BT_SCO_STATE_RUNNING ||
            bt->rx->state == BT_SCO_STATE_ENDING) {
                if (bt->rx->xrun) {
@@ -1067,6 +1105,33 @@ static int btcvsd_band_set(struct snd_kcontrol *kcontrol,
        return 0;
 }
 
+static int btcvsd_loopback_get(struct snd_kcontrol *kcontrol,
+                              struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+       struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(cmpnt);
+       bool lpbk_en = bt->tx->state == BT_SCO_STATE_LOOPBACK;
+
+       ucontrol->value.integer.value[0] = lpbk_en;
+       return 0;
+}
+
+static int btcvsd_loopback_set(struct snd_kcontrol *kcontrol,
+                              struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+       struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(cmpnt);
+
+       if (ucontrol->value.integer.value[0]) {
+               mtk_btcvsd_snd_set_state(bt, bt->tx, BT_SCO_STATE_LOOPBACK);
+               mtk_btcvsd_snd_set_state(bt, bt->rx, BT_SCO_STATE_LOOPBACK);
+       } else {
+               mtk_btcvsd_snd_set_state(bt, bt->tx, BT_SCO_STATE_RUNNING);
+               mtk_btcvsd_snd_set_state(bt, bt->rx, BT_SCO_STATE_RUNNING);
+       }
+       return 0;
+}
+
 static int btcvsd_tx_mute_get(struct snd_kcontrol *kcontrol,
                              struct snd_ctl_elem_value *ucontrol)
 {
@@ -1202,6 +1267,8 @@ static int btcvsd_tx_timestamp_get(struct snd_kcontrol *kcontrol,
 static const struct snd_kcontrol_new mtk_btcvsd_snd_controls[] = {
        SOC_ENUM_EXT("BTCVSD Band", btcvsd_enum[0],
                     btcvsd_band_get, btcvsd_band_set),
+       SOC_SINGLE_BOOL_EXT("BTCVSD Loopback Switch", 0,
+                           btcvsd_loopback_get, btcvsd_loopback_set),
        SOC_SINGLE_BOOL_EXT("BTCVSD Tx Mute Switch", 0,
                            btcvsd_tx_mute_get, btcvsd_tx_mute_set),
        SOC_SINGLE_BOOL_EXT("BTCVSD Tx Irq Received Switch", 0,
index f523ad103acc4cc9ce2bd745a24aa6f5988cc7bf..48e81c5d52fc27959d8b8215a1903764bf302fdd 100644 (file)
@@ -605,6 +605,10 @@ void mt8183_mck_disable(struct mtk_base_afe *afe, int mck_id)
        int m_sel_id = mck_div[mck_id].m_sel_id;
        int div_clk_id = mck_div[mck_id].div_clk_id;
 
+       /* i2s5 mck not support */
+       if (mck_id == MT8183_I2S5_MCK)
+               return;
+
        clk_disable_unprepare(afe_priv->clk[div_clk_id]);
        if (m_sel_id >= 0)
                clk_disable_unprepare(afe_priv->clk[m_sel_id]);
index 400e29edb1c9c4db4d3afbbad2b6a41952100a22..d0b403a0e27b830bc480935fa75df123c3375302 100644 (file)
@@ -24,7 +24,7 @@
 
 #include "rockchip_pdm.h"
 
-#define PDM_DMA_BURST_SIZE     (16) /* size * width: 16*4 = 64 bytes */
+#define PDM_DMA_BURST_SIZE     (8) /* size * width: 8*4 = 32 bytes */
 
 struct rk_pdm_dev {
        struct device *dev;
@@ -208,7 +208,9 @@ static int rockchip_pdm_set_fmt(struct snd_soc_dai *cpu_dai,
                return -EINVAL;
        }
 
+       pm_runtime_get_sync(cpu_dai->dev);
        regmap_update_bits(pdm->regmap, PDM_CLK_CTRL, mask, val);
+       pm_runtime_put(cpu_dai->dev);
 
        return 0;
 }
index 4231001226f494da587a7096d27c001802fcdf9c..ab471d550d17adf682d0c5c26607e6de85885791 100644 (file)
@@ -1130,11 +1130,11 @@ static const struct snd_soc_dapm_widget samsung_i2s_widgets[] = {
 };
 
 static const struct snd_soc_dapm_route samsung_i2s_dapm_routes[] = {
-       { "Playback Mixer", NULL, "Primary" },
-       { "Playback Mixer", NULL, "Secondary" },
+       { "Playback Mixer", NULL, "Primary Playback" },
+       { "Playback Mixer", NULL, "Secondary Playback" },
 
        { "Mixer DAI TX", NULL, "Playback Mixer" },
-       { "Playback Mixer", NULL, "Mixer DAI RX" },
+       { "Primary Capture", NULL, "Mixer DAI RX" },
 };
 
 static const struct snd_soc_component_driver samsung_i2s_component = {
@@ -1155,7 +1155,8 @@ static int i2s_alloc_dais(struct samsung_i2s_priv *priv,
                          int num_dais)
 {
        static const char *dai_names[] = { "samsung-i2s", "samsung-i2s-sec" };
-       static const char *stream_names[] = { "Primary", "Secondary" };
+       static const char *stream_names[] = { "Primary Playback",
+                                             "Secondary Playback" };
        struct snd_soc_dai_driver *dai_drv;
        struct i2s_dai *dai;
        int i;
@@ -1201,6 +1202,7 @@ static int i2s_alloc_dais(struct samsung_i2s_priv *priv,
        dai_drv->capture.channels_max = 2;
        dai_drv->capture.rates = i2s_dai_data->pcm_rates;
        dai_drv->capture.formats = SAMSUNG_I2S_FMTS;
+       dai_drv->capture.stream_name = "Primary Capture";
 
        return 0;
 }
index 694512f980fdc207577fcd2a5a59d7a6484bfdd0..1dc54c4206f0adc1ed5250c2d9bf3f44f5c7adf2 100644 (file)
@@ -91,11 +91,11 @@ static int odroid_card_be_hw_params(struct snd_pcm_substream *substream,
                return ret;
 
        /*
-        *  We add 1 to the rclk_freq value in order to avoid too low clock
+        *  We add 2 to the rclk_freq value in order to avoid too low clock
         *  frequency values due to the EPLL output frequency not being exact
         *  multiple of the audio sampling rate.
         */
-       rclk_freq = params_rate(params) * rfs + 1;
+       rclk_freq = params_rate(params) * rfs + 2;
 
        ret = clk_set_rate(priv->sclk_i2s, rclk_freq);
        if (ret < 0)
index 022996d2db1301d16619ef8ead029a19fce1b75b..4fe83e611c01e0d983e5af5c79594d4c858ec610 100644 (file)
@@ -110,6 +110,8 @@ static const struct of_device_id rsnd_of_match[] = {
        { .compatible = "renesas,rcar_sound-gen1", .data = (void *)RSND_GEN1 },
        { .compatible = "renesas,rcar_sound-gen2", .data = (void *)RSND_GEN2 },
        { .compatible = "renesas,rcar_sound-gen3", .data = (void *)RSND_GEN3 },
+       /* Special Handling */
+       { .compatible = "renesas,rcar_sound-r8a77990", .data = (void *)(RSND_GEN3 | RSND_SOC_E) },
        {},
 };
 MODULE_DEVICE_TABLE(of, rsnd_of_match);
index 90625c57847b51281c5b38f81f8ab062a70a3785..0e6ef4e1840021d00c94089ca8522c956d4bb813 100644 (file)
@@ -607,6 +607,8 @@ struct rsnd_priv {
 #define RSND_GEN1      (1 << 0)
 #define RSND_GEN2      (2 << 0)
 #define RSND_GEN3      (3 << 0)
+#define RSND_SOC_MASK  (0xFF << 4)
+#define RSND_SOC_E     (1 << 4) /* E1/E2/E3 */
 
        /*
         * below value will be filled on rsnd_gen_probe()
@@ -679,6 +681,9 @@ struct rsnd_priv {
 #define rsnd_is_gen1(priv)     (((priv)->flags & RSND_GEN_MASK) == RSND_GEN1)
 #define rsnd_is_gen2(priv)     (((priv)->flags & RSND_GEN_MASK) == RSND_GEN2)
 #define rsnd_is_gen3(priv)     (((priv)->flags & RSND_GEN_MASK) == RSND_GEN3)
+#define rsnd_is_e3(priv)       (((priv)->flags & \
+                                       (RSND_GEN_MASK | RSND_SOC_MASK)) == \
+                                       (RSND_GEN3 | RSND_SOC_E))
 
 #define rsnd_flags_has(p, f) ((p)->flags & (f))
 #define rsnd_flags_set(p, f) ((p)->flags |= (f))
index db81e066b92ef98902d4433e939b25ab0a635625..585ffba0244b9f568685ed52323d31f7f9fcb3b4 100644 (file)
@@ -14,7 +14,6 @@
  */
 
 #include "rsnd.h"
-#include <linux/sys_soc.h>
 
 #define SRC_NAME "src"
 
@@ -135,7 +134,7 @@ unsigned int rsnd_src_get_rate(struct rsnd_priv *priv,
        return rate;
 }
 
-const static u32 bsdsr_table_pattern1[] = {
+static const u32 bsdsr_table_pattern1[] = {
        0x01800000, /* 6 - 1/6 */
        0x01000000, /* 6 - 1/4 */
        0x00c00000, /* 6 - 1/3 */
@@ -144,7 +143,7 @@ const static u32 bsdsr_table_pattern1[] = {
        0x00400000, /* 6 - 1   */
 };
 
-const static u32 bsdsr_table_pattern2[] = {
+static const u32 bsdsr_table_pattern2[] = {
        0x02400000, /* 6 - 1/6 */
        0x01800000, /* 6 - 1/4 */
        0x01200000, /* 6 - 1/3 */
@@ -153,7 +152,7 @@ const static u32 bsdsr_table_pattern2[] = {
        0x00600000, /* 6 - 1   */
 };
 
-const static u32 bsisr_table[] = {
+static const u32 bsisr_table[] = {
        0x00100060, /* 6 - 1/6 */
        0x00100040, /* 6 - 1/4 */
        0x00100030, /* 6 - 1/3 */
@@ -162,7 +161,7 @@ const static u32 bsisr_table[] = {
        0x00100020, /* 6 - 1   */
 };
 
-const static u32 chan288888[] = {
+static const u32 chan288888[] = {
        0x00000006, /* 1 to 2 */
        0x000001fe, /* 1 to 8 */
        0x000001fe, /* 1 to 8 */
@@ -171,7 +170,7 @@ const static u32 chan288888[] = {
        0x000001fe, /* 1 to 8 */
 };
 
-const static u32 chan244888[] = {
+static const u32 chan244888[] = {
        0x00000006, /* 1 to 2 */
        0x0000001e, /* 1 to 4 */
        0x0000001e, /* 1 to 4 */
@@ -180,7 +179,7 @@ const static u32 chan244888[] = {
        0x000001fe, /* 1 to 8 */
 };
 
-const static u32 chan222222[] = {
+static const u32 chan222222[] = {
        0x00000006, /* 1 to 2 */
        0x00000006, /* 1 to 2 */
        0x00000006, /* 1 to 2 */
@@ -189,18 +188,12 @@ const static u32 chan222222[] = {
        0x00000006, /* 1 to 2 */
 };
 
-static const struct soc_device_attribute ov_soc[] = {
-       { .soc_id = "r8a77990" }, /* E3 */
-       { /* sentinel */ }
-};
-
 static void rsnd_src_set_convert_rate(struct rsnd_dai_stream *io,
                                      struct rsnd_mod *mod)
 {
        struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
        struct device *dev = rsnd_priv_to_dev(priv);
        struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
-       const struct soc_device_attribute *soc = soc_device_match(ov_soc);
        int is_play = rsnd_io_is_play(io);
        int use_src = 0;
        u32 fin, fout;
@@ -307,7 +300,7 @@ static void rsnd_src_set_convert_rate(struct rsnd_dai_stream *io,
        /*
         * E3 need to overwrite
         */
-       if (soc)
+       if (rsnd_is_e3(priv))
                switch (rsnd_mod_id(mod)) {
                case 0:
                case 4:
index 93d316d5bf8e3cac63d9955cfa9e0a6b7bbbb9d0..46e3ab0fced47342be93c32f6509a79829967281 100644 (file)
@@ -947,7 +947,7 @@ static void soc_cleanup_component(struct snd_soc_component *component)
        snd_soc_dapm_free(snd_soc_component_get_dapm(component));
        soc_cleanup_component_debugfs(component);
        component->card = NULL;
-       if (!component->driver->ignore_module_refcount)
+       if (!component->driver->module_get_upon_open)
                module_put(component->dev->driver->owner);
 }
 
@@ -1381,7 +1381,7 @@ static int soc_probe_component(struct snd_soc_card *card,
                return 0;
        }
 
-       if (!component->driver->ignore_module_refcount &&
+       if (!component->driver->module_get_upon_open &&
            !try_module_get(component->dev->driver->owner))
                return -ENODEV;
 
@@ -2797,6 +2797,7 @@ int snd_soc_register_card(struct snd_soc_card *card)
 
                ret = soc_init_dai_link(card, link);
                if (ret) {
+                       soc_cleanup_platform(card);
                        dev_err(card->dev, "ASoC: failed to init link %s\n",
                                link->name);
                        mutex_unlock(&client_mutex);
@@ -2819,6 +2820,7 @@ int snd_soc_register_card(struct snd_soc_card *card)
        card->instantiated = 0;
        mutex_init(&card->mutex);
        mutex_init(&card->dapm_mutex);
+       spin_lock_init(&card->dpcm_lock);
 
        return snd_soc_bind_card(card);
 }
index 1ec06ef6d161606922b1a3f8d16e2e685129db62..0382a47b30bd8182d40340c839268233cd47e21b 100644 (file)
@@ -3650,6 +3650,13 @@ snd_soc_dapm_new_control_unlocked(struct snd_soc_dapm_context *dapm,
        case snd_soc_dapm_dac:
        case snd_soc_dapm_aif_in:
        case snd_soc_dapm_pga:
+       case snd_soc_dapm_buffer:
+       case snd_soc_dapm_scheduler:
+       case snd_soc_dapm_effect:
+       case snd_soc_dapm_src:
+       case snd_soc_dapm_asrc:
+       case snd_soc_dapm_encoder:
+       case snd_soc_dapm_decoder:
        case snd_soc_dapm_out_drv:
        case snd_soc_dapm_micbias:
        case snd_soc_dapm_line:
@@ -3957,6 +3964,10 @@ snd_soc_dapm_free_kcontrol(struct snd_soc_card *card,
        int count;
 
        devm_kfree(card->dev, (void *)*private_value);
+
+       if (!w_param_text)
+               return;
+
        for (count = 0 ; count < num_params; count++)
                devm_kfree(card->dev, (void *)w_param_text[count]);
        devm_kfree(card->dev, w_param_text);
index 0d5ec68a1e50869e00ea6abb13b229c79b903329..be80a12fba27cc381b0438a95bb4987c6843cb39 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/delay.h>
 #include <linux/pinctrl/consumer.h>
 #include <linux/pm_runtime.h>
+#include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/workqueue.h>
 #include <linux/export.h>
@@ -463,6 +464,9 @@ static int soc_pcm_components_close(struct snd_pcm_substream *substream,
                        continue;
 
                component->driver->ops->close(substream);
+
+               if (component->driver->module_get_upon_open)
+                       module_put(component->dev->driver->owner);
        }
 
        return 0;
@@ -513,6 +517,12 @@ static int soc_pcm_open(struct snd_pcm_substream *substream)
                    !component->driver->ops->open)
                        continue;
 
+               if (component->driver->module_get_upon_open &&
+                   !try_module_get(component->dev->driver->owner)) {
+                       ret = -ENODEV;
+                       goto module_err;
+               }
+
                ret = component->driver->ops->open(substream);
                if (ret < 0) {
                        dev_err(component->dev,
@@ -628,7 +638,7 @@ static int soc_pcm_open(struct snd_pcm_substream *substream)
 
 component_err:
        soc_pcm_components_close(substream, component);
-
+module_err:
        if (cpu_dai->driver->ops->shutdown)
                cpu_dai->driver->ops->shutdown(substream, cpu_dai);
 out:
@@ -954,10 +964,13 @@ static int soc_pcm_hw_params(struct snd_pcm_substream *substream,
                codec_params = *params;
 
                /* fixup params based on TDM slot masks */
-               if (codec_dai->tx_mask)
+               if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
+                   codec_dai->tx_mask)
                        soc_pcm_codec_params_fixup(&codec_params,
                                                   codec_dai->tx_mask);
-               if (codec_dai->rx_mask)
+
+               if (substream->stream == SNDRV_PCM_STREAM_CAPTURE &&
+                   codec_dai->rx_mask)
                        soc_pcm_codec_params_fixup(&codec_params,
                                                   codec_dai->rx_mask);
 
@@ -1213,6 +1226,7 @@ static int dpcm_be_connect(struct snd_soc_pcm_runtime *fe,
                struct snd_soc_pcm_runtime *be, int stream)
 {
        struct snd_soc_dpcm *dpcm;
+       unsigned long flags;
 
        /* only add new dpcms */
        for_each_dpcm_be(fe, stream, dpcm) {
@@ -1228,8 +1242,10 @@ static int dpcm_be_connect(struct snd_soc_pcm_runtime *fe,
        dpcm->fe = fe;
        be->dpcm[stream].runtime = fe->dpcm[stream].runtime;
        dpcm->state = SND_SOC_DPCM_LINK_STATE_NEW;
+       spin_lock_irqsave(&fe->card->dpcm_lock, flags);
        list_add(&dpcm->list_be, &fe->dpcm[stream].be_clients);
        list_add(&dpcm->list_fe, &be->dpcm[stream].fe_clients);
+       spin_unlock_irqrestore(&fe->card->dpcm_lock, flags);
 
        dev_dbg(fe->dev, "connected new DPCM %s path %s %s %s\n",
                        stream ? "capture" : "playback",  fe->dai_link->name,
@@ -1275,6 +1291,7 @@ static void dpcm_be_reparent(struct snd_soc_pcm_runtime *fe,
 void dpcm_be_disconnect(struct snd_soc_pcm_runtime *fe, int stream)
 {
        struct snd_soc_dpcm *dpcm, *d;
+       unsigned long flags;
 
        for_each_dpcm_be_safe(fe, stream, dpcm, d) {
                dev_dbg(fe->dev, "ASoC: BE %s disconnect check for %s\n",
@@ -1294,8 +1311,10 @@ void dpcm_be_disconnect(struct snd_soc_pcm_runtime *fe, int stream)
 #ifdef CONFIG_DEBUG_FS
                debugfs_remove(dpcm->debugfs_state);
 #endif
+               spin_lock_irqsave(&fe->card->dpcm_lock, flags);
                list_del(&dpcm->list_be);
                list_del(&dpcm->list_fe);
+               spin_unlock_irqrestore(&fe->card->dpcm_lock, flags);
                kfree(dpcm);
        }
 }
@@ -1547,10 +1566,13 @@ int dpcm_process_paths(struct snd_soc_pcm_runtime *fe,
 void dpcm_clear_pending_state(struct snd_soc_pcm_runtime *fe, int stream)
 {
        struct snd_soc_dpcm *dpcm;
+       unsigned long flags;
 
+       spin_lock_irqsave(&fe->card->dpcm_lock, flags);
        for_each_dpcm_be(fe, stream, dpcm)
                dpcm->be->dpcm[stream].runtime_update =
                                                SND_SOC_DPCM_UPDATE_NO;
+       spin_unlock_irqrestore(&fe->card->dpcm_lock, flags);
 }
 
 static void dpcm_be_dai_startup_unwind(struct snd_soc_pcm_runtime *fe,
@@ -1899,10 +1921,15 @@ static int dpcm_apply_symmetry(struct snd_pcm_substream *fe_substream,
                struct snd_soc_pcm_runtime *be = dpcm->be;
                struct snd_pcm_substream *be_substream =
                        snd_soc_dpcm_get_substream(be, stream);
-               struct snd_soc_pcm_runtime *rtd = be_substream->private_data;
+               struct snd_soc_pcm_runtime *rtd;
                struct snd_soc_dai *codec_dai;
                int i;
 
+               /* A backend may not have the requested substream */
+               if (!be_substream)
+                       continue;
+
+               rtd = be_substream->private_data;
                if (rtd->dai_link->be_hw_params_fixup)
                        continue;
 
@@ -2571,6 +2598,7 @@ static int dpcm_run_update_startup(struct snd_soc_pcm_runtime *fe, int stream)
        struct snd_soc_dpcm *dpcm;
        enum snd_soc_dpcm_trigger trigger = fe->dai_link->trigger[stream];
        int ret;
+       unsigned long flags;
 
        dev_dbg(fe->dev, "ASoC: runtime %s open on FE %s\n",
                        stream ? "capture" : "playback", fe->dai_link->name);
@@ -2640,11 +2668,13 @@ static int dpcm_run_update_startup(struct snd_soc_pcm_runtime *fe, int stream)
        dpcm_be_dai_shutdown(fe, stream);
 disconnect:
        /* disconnect any non started BEs */
+       spin_lock_irqsave(&fe->card->dpcm_lock, flags);
        for_each_dpcm_be(fe, stream, dpcm) {
                struct snd_soc_pcm_runtime *be = dpcm->be;
                if (be->dpcm[stream].state != SND_SOC_DPCM_STATE_START)
                                dpcm->state = SND_SOC_DPCM_LINK_STATE_FREE;
        }
+       spin_unlock_irqrestore(&fe->card->dpcm_lock, flags);
 
        return ret;
 }
@@ -3221,7 +3251,10 @@ int snd_soc_dpcm_can_be_free_stop(struct snd_soc_pcm_runtime *fe,
 {
        struct snd_soc_dpcm *dpcm;
        int state;
+       int ret = 1;
+       unsigned long flags;
 
+       spin_lock_irqsave(&fe->card->dpcm_lock, flags);
        for_each_dpcm_fe(be, stream, dpcm) {
 
                if (dpcm->fe == fe)
@@ -3230,12 +3263,15 @@ int snd_soc_dpcm_can_be_free_stop(struct snd_soc_pcm_runtime *fe,
                state = dpcm->fe->dpcm[stream].state;
                if (state == SND_SOC_DPCM_STATE_START ||
                        state == SND_SOC_DPCM_STATE_PAUSED ||
-                       state == SND_SOC_DPCM_STATE_SUSPEND)
-                       return 0;
+                       state == SND_SOC_DPCM_STATE_SUSPEND) {
+                       ret = 0;
+                       break;
+               }
        }
+       spin_unlock_irqrestore(&fe->card->dpcm_lock, flags);
 
        /* it's safe to free/stop this BE DAI */
-       return 1;
+       return ret;
 }
 EXPORT_SYMBOL_GPL(snd_soc_dpcm_can_be_free_stop);
 
@@ -3248,7 +3284,10 @@ int snd_soc_dpcm_can_be_params(struct snd_soc_pcm_runtime *fe,
 {
        struct snd_soc_dpcm *dpcm;
        int state;
+       int ret = 1;
+       unsigned long flags;
 
+       spin_lock_irqsave(&fe->card->dpcm_lock, flags);
        for_each_dpcm_fe(be, stream, dpcm) {
 
                if (dpcm->fe == fe)
@@ -3258,12 +3297,15 @@ int snd_soc_dpcm_can_be_params(struct snd_soc_pcm_runtime *fe,
                if (state == SND_SOC_DPCM_STATE_START ||
                        state == SND_SOC_DPCM_STATE_PAUSED ||
                        state == SND_SOC_DPCM_STATE_SUSPEND ||
-                       state == SND_SOC_DPCM_STATE_PREPARE)
-                       return 0;
+                       state == SND_SOC_DPCM_STATE_PREPARE) {
+                       ret = 0;
+                       break;
+               }
        }
+       spin_unlock_irqrestore(&fe->card->dpcm_lock, flags);
 
        /* it's safe to change hw_params */
-       return 1;
+       return ret;
 }
 EXPORT_SYMBOL_GPL(snd_soc_dpcm_can_be_params);
 
@@ -3302,6 +3344,7 @@ static ssize_t dpcm_show_state(struct snd_soc_pcm_runtime *fe,
        struct snd_pcm_hw_params *params = &fe->dpcm[stream].hw_params;
        struct snd_soc_dpcm *dpcm;
        ssize_t offset = 0;
+       unsigned long flags;
 
        /* FE state */
        offset += snprintf(buf + offset, size - offset,
@@ -3329,6 +3372,7 @@ static ssize_t dpcm_show_state(struct snd_soc_pcm_runtime *fe,
                goto out;
        }
 
+       spin_lock_irqsave(&fe->card->dpcm_lock, flags);
        for_each_dpcm_be(fe, stream, dpcm) {
                struct snd_soc_pcm_runtime *be = dpcm->be;
                params = &dpcm->hw_params;
@@ -3349,7 +3393,7 @@ static ssize_t dpcm_show_state(struct snd_soc_pcm_runtime *fe,
                                params_channels(params),
                                params_rate(params));
        }
-
+       spin_unlock_irqrestore(&fe->card->dpcm_lock, flags);
 out:
        return offset;
 }
index 25fca7055464a894e5e260bf872de8c410136f56..96852d25061936e1f72b9c929686a1a740bdc6a8 100644 (file)
@@ -482,10 +482,11 @@ static void remove_widget(struct snd_soc_component *comp,
 
                        snd_ctl_remove(card, kcontrol);
 
-                       kfree(dobj->control.dvalues);
+                       /* free enum kcontrol's dvalues and dtexts */
+                       kfree(se->dobj.control.dvalues);
                        for (j = 0; j < se->items; j++)
-                               kfree(dobj->control.dtexts[j]);
-                       kfree(dobj->control.dtexts);
+                               kfree(se->dobj.control.dtexts[j]);
+                       kfree(se->dobj.control.dtexts);
 
                        kfree(se);
                        kfree(w->kcontrol_news[i].name);
index 47901983a6ff88706dd847376a42d5aeb2c3a4c6..78bed97347136974d3da6b8a09d3465eaafffbdd 100644 (file)
@@ -9,6 +9,7 @@
 
 #include <linux/clk.h>
 #include <linux/module.h>
+#include <linux/mutex.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 
@@ -37,6 +38,8 @@ struct stm32_adfsdm_priv {
        /* PCM buffer */
        unsigned char *pcm_buff;
        unsigned int pos;
+
+       struct mutex lock; /* protect against race condition on iio state */
 };
 
 static const struct snd_pcm_hardware stm32_adfsdm_pcm_hw = {
@@ -62,10 +65,12 @@ static void stm32_adfsdm_shutdown(struct snd_pcm_substream *substream,
 {
        struct stm32_adfsdm_priv *priv = snd_soc_dai_get_drvdata(dai);
 
+       mutex_lock(&priv->lock);
        if (priv->iio_active) {
                iio_channel_stop_all_cb(priv->iio_cb);
                priv->iio_active = false;
        }
+       mutex_unlock(&priv->lock);
 }
 
 static int stm32_adfsdm_dai_prepare(struct snd_pcm_substream *substream,
@@ -74,13 +79,19 @@ static int stm32_adfsdm_dai_prepare(struct snd_pcm_substream *substream,
        struct stm32_adfsdm_priv *priv = snd_soc_dai_get_drvdata(dai);
        int ret;
 
+       mutex_lock(&priv->lock);
+       if (priv->iio_active) {
+               iio_channel_stop_all_cb(priv->iio_cb);
+               priv->iio_active = false;
+       }
+
        ret = iio_write_channel_attribute(priv->iio_ch,
                                          substream->runtime->rate, 0,
                                          IIO_CHAN_INFO_SAMP_FREQ);
        if (ret < 0) {
                dev_err(dai->dev, "%s: Failed to set %d sampling rate\n",
                        __func__, substream->runtime->rate);
-               return ret;
+               goto out;
        }
 
        if (!priv->iio_active) {
@@ -92,6 +103,9 @@ static int stm32_adfsdm_dai_prepare(struct snd_pcm_substream *substream,
                                __func__, ret);
        }
 
+out:
+       mutex_unlock(&priv->lock);
+
        return ret;
 }
 
@@ -291,6 +305,7 @@ MODULE_DEVICE_TABLE(of, stm32_adfsdm_of_match);
 static int stm32_adfsdm_probe(struct platform_device *pdev)
 {
        struct stm32_adfsdm_priv *priv;
+       struct snd_soc_component *component;
        int ret;
 
        priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
@@ -299,6 +314,7 @@ static int stm32_adfsdm_probe(struct platform_device *pdev)
 
        priv->dev = &pdev->dev;
        priv->dai_drv = stm32_adfsdm_dai;
+       mutex_init(&priv->lock);
 
        dev_set_drvdata(&pdev->dev, priv);
 
@@ -317,9 +333,15 @@ static int stm32_adfsdm_probe(struct platform_device *pdev)
        if (IS_ERR(priv->iio_cb))
                return PTR_ERR(priv->iio_cb);
 
-       ret = devm_snd_soc_register_component(&pdev->dev,
-                                             &stm32_adfsdm_soc_platform,
-                                             NULL, 0);
+       component = devm_kzalloc(&pdev->dev, sizeof(*component), GFP_KERNEL);
+       if (!component)
+               return -ENOMEM;
+#ifdef CONFIG_DEBUG_FS
+       component->debugfs_prefix = "pcm";
+#endif
+
+       ret = snd_soc_add_component(&pdev->dev, component,
+                                   &stm32_adfsdm_soc_platform, NULL, 0);
        if (ret < 0)
                dev_err(&pdev->dev, "%s: Failed to register PCM platform\n",
                        __func__);
@@ -327,12 +349,20 @@ static int stm32_adfsdm_probe(struct platform_device *pdev)
        return ret;
 }
 
+static int stm32_adfsdm_remove(struct platform_device *pdev)
+{
+       snd_soc_unregister_component(&pdev->dev);
+
+       return 0;
+}
+
 static struct platform_driver stm32_adfsdm_driver = {
        .driver = {
                   .name = STM32_ADFSDM_DRV_NAME,
                   .of_match_table = stm32_adfsdm_of_match,
                   },
        .probe = stm32_adfsdm_probe,
+       .remove = stm32_adfsdm_remove,
 };
 
 module_platform_driver(stm32_adfsdm_driver);
index 47c334de6b0966a4e5fd4a589e8ce6fd757c1fce..8968458eec62d6b796e2eaae3df508e03c375e6d 100644 (file)
@@ -281,7 +281,6 @@ static bool stm32_i2s_readable_reg(struct device *dev, unsigned int reg)
        case STM32_I2S_CFG2_REG:
        case STM32_I2S_IER_REG:
        case STM32_I2S_SR_REG:
-       case STM32_I2S_TXDR_REG:
        case STM32_I2S_RXDR_REG:
        case STM32_I2S_CGFR_REG:
                return true;
@@ -293,7 +292,7 @@ static bool stm32_i2s_readable_reg(struct device *dev, unsigned int reg)
 static bool stm32_i2s_volatile_reg(struct device *dev, unsigned int reg)
 {
        switch (reg) {
-       case STM32_I2S_TXDR_REG:
+       case STM32_I2S_SR_REG:
        case STM32_I2S_RXDR_REG:
                return true;
        default:
index 14c9591aae4260d94f6d19231aaa56ad436e5469..d68d62f12df56098214a94de25758598c9f7502f 100644 (file)
@@ -105,6 +105,7 @@ static int stm32_sai_set_sync(struct stm32_sai_data *sai_client,
        if (!pdev) {
                dev_err(&sai_client->pdev->dev,
                        "Device not found for node %pOFn\n", np_provider);
+               of_node_put(np_provider);
                return -ENODEV;
        }
 
@@ -113,19 +114,20 @@ static int stm32_sai_set_sync(struct stm32_sai_data *sai_client,
                dev_err(&sai_client->pdev->dev,
                        "SAI sync provider data not found\n");
                ret = -EINVAL;
-               goto out_put_dev;
+               goto error;
        }
 
        /* Configure sync client */
        ret = stm32_sai_sync_conf_client(sai_client, synci);
        if (ret < 0)
-               goto out_put_dev;
+               goto error;
 
        /* Configure sync provider */
        ret = stm32_sai_sync_conf_provider(sai_provider, synco);
 
-out_put_dev:
+error:
        put_device(&pdev->dev);
+       of_node_put(np_provider);
        return ret;
 }
 
index f9297228c41ce4412f86d9ba7010ba714d44d345..d7045aa520de56eb42d108a14e92aee810d15f08 100644 (file)
@@ -70,6 +70,7 @@
 #define SAI_IEC60958_STATUS_BYTES      24
 
 #define SAI_MCLK_NAME_LEN              32
+#define SAI_RATE_11K                   11025
 
 /**
  * struct stm32_sai_sub_data - private data of SAI sub block (block A or B)
  * @slot_mask: rx or tx active slots mask. set at init or at runtime
  * @data_size: PCM data width. corresponds to PCM substream width.
  * @spdif_frm_cnt: S/PDIF playback frame counter
- * @snd_aes_iec958: iec958 data
+ * @iec958: iec958 data
  * @ctrl_lock: control lock
+ * @irq_lock: prevent race condition with IRQ
  */
 struct stm32_sai_sub_data {
        struct platform_device *pdev;
@@ -133,6 +135,7 @@ struct stm32_sai_sub_data {
        unsigned int spdif_frm_cnt;
        struct snd_aes_iec958 iec958;
        struct mutex ctrl_lock; /* protect resources accessed by controls */
+       spinlock_t irq_lock; /* used to prevent race condition with IRQ */
 };
 
 enum stm32_sai_fifo_th {
@@ -307,6 +310,25 @@ static int stm32_sai_set_clk_div(struct stm32_sai_sub_data *sai,
        return ret;
 }
 
+static int stm32_sai_set_parent_clock(struct stm32_sai_sub_data *sai,
+                                     unsigned int rate)
+{
+       struct platform_device *pdev = sai->pdev;
+       struct clk *parent_clk = sai->pdata->clk_x8k;
+       int ret;
+
+       if (!(rate % SAI_RATE_11K))
+               parent_clk = sai->pdata->clk_x11k;
+
+       ret = clk_set_parent(sai->sai_ck, parent_clk);
+       if (ret)
+               dev_err(&pdev->dev, " Error %d setting sai_ck parent clock. %s",
+                       ret, ret == -EBUSY ?
+                       "Active stream rates conflict\n" : "\n");
+
+       return ret;
+}
+
 static long stm32_sai_mclk_round_rate(struct clk_hw *hw, unsigned long rate,
                                      unsigned long *prate)
 {
@@ -474,8 +496,10 @@ static irqreturn_t stm32_sai_isr(int irq, void *devid)
                status = SNDRV_PCM_STATE_XRUN;
        }
 
-       if (status != SNDRV_PCM_STATE_RUNNING)
+       spin_lock(&sai->irq_lock);
+       if (status != SNDRV_PCM_STATE_RUNNING && sai->substream)
                snd_pcm_stop_xrun(sai->substream);
+       spin_unlock(&sai->irq_lock);
 
        return IRQ_HANDLED;
 }
@@ -486,25 +510,29 @@ static int stm32_sai_set_sysclk(struct snd_soc_dai *cpu_dai,
        struct stm32_sai_sub_data *sai = snd_soc_dai_get_drvdata(cpu_dai);
        int ret;
 
-       if (dir == SND_SOC_CLOCK_OUT) {
+       if (dir == SND_SOC_CLOCK_OUT && sai->sai_mclk) {
                ret = regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX,
                                         SAI_XCR1_NODIV,
                                         (unsigned int)~SAI_XCR1_NODIV);
                if (ret < 0)
                        return ret;
 
-               dev_dbg(cpu_dai->dev, "SAI MCLK frequency is %uHz\n", freq);
-               sai->mclk_rate = freq;
+               /* If master clock is used, set parent clock now */
+               ret = stm32_sai_set_parent_clock(sai, freq);
+               if (ret)
+                       return ret;
 
-               if (sai->sai_mclk) {
-                       ret = clk_set_rate_exclusive(sai->sai_mclk,
-                                                    sai->mclk_rate);
-                       if (ret) {
-                               dev_err(cpu_dai->dev,
-                                       "Could not set mclk rate\n");
-                               return ret;
-                       }
+               ret = clk_set_rate_exclusive(sai->sai_mclk, freq);
+               if (ret) {
+                       dev_err(cpu_dai->dev,
+                               ret == -EBUSY ?
+                               "Active streams have incompatible rates" :
+                               "Could not set mclk rate\n");
+                       return ret;
                }
+
+               dev_dbg(cpu_dai->dev, "SAI MCLK frequency is %uHz\n", freq);
+               sai->mclk_rate = freq;
        }
 
        return 0;
@@ -679,8 +707,19 @@ static int stm32_sai_startup(struct snd_pcm_substream *substream,
 {
        struct stm32_sai_sub_data *sai = snd_soc_dai_get_drvdata(cpu_dai);
        int imr, cr2, ret;
+       unsigned long flags;
 
+       spin_lock_irqsave(&sai->irq_lock, flags);
        sai->substream = substream;
+       spin_unlock_irqrestore(&sai->irq_lock, flags);
+
+       if (STM_SAI_PROTOCOL_IS_SPDIF(sai)) {
+               snd_pcm_hw_constraint_mask64(substream->runtime,
+                                            SNDRV_PCM_HW_PARAM_FORMAT,
+                                            SNDRV_PCM_FMTBIT_S32_LE);
+               snd_pcm_hw_constraint_single(substream->runtime,
+                                            SNDRV_PCM_HW_PARAM_CHANNELS, 2);
+       }
 
        ret = clk_prepare_enable(sai->sai_ck);
        if (ret < 0) {
@@ -898,14 +937,16 @@ static int stm32_sai_configure_clock(struct snd_soc_dai *cpu_dai,
                                     struct snd_pcm_hw_params *params)
 {
        struct stm32_sai_sub_data *sai = snd_soc_dai_get_drvdata(cpu_dai);
-       int div = 0;
+       int div = 0, cr1 = 0;
        int sai_clk_rate, mclk_ratio, den;
        unsigned int rate = params_rate(params);
+       int ret;
 
-       if (!(rate % 11025))
-               clk_set_parent(sai->sai_ck, sai->pdata->clk_x11k);
-       else
-               clk_set_parent(sai->sai_ck, sai->pdata->clk_x8k);
+       if (!sai->sai_mclk) {
+               ret = stm32_sai_set_parent_clock(sai, rate);
+               if (ret)
+                       return ret;
+       }
        sai_clk_rate = clk_get_rate(sai->sai_ck);
 
        if (STM_SAI_IS_F4(sai->pdata)) {
@@ -943,13 +984,19 @@ static int stm32_sai_configure_clock(struct snd_soc_dai *cpu_dai,
                } else {
                        if (sai->mclk_rate) {
                                mclk_ratio = sai->mclk_rate / rate;
-                               if ((mclk_ratio != 512) &&
-                                   (mclk_ratio != 256)) {
+                               if (mclk_ratio == 512) {
+                                       cr1 = SAI_XCR1_OSR;
+                               } else if (mclk_ratio != 256) {
                                        dev_err(cpu_dai->dev,
                                                "Wrong mclk ratio %d\n",
                                                mclk_ratio);
                                        return -EINVAL;
                                }
+
+                               regmap_update_bits(sai->regmap,
+                                                  STM_SAI_CR1_REGX,
+                                                  SAI_XCR1_OSR, cr1);
+
                                div = stm32_sai_get_clk_div(sai, sai_clk_rate,
                                                            sai->mclk_rate);
                                if (div < 0)
@@ -1051,28 +1098,36 @@ static void stm32_sai_shutdown(struct snd_pcm_substream *substream,
                               struct snd_soc_dai *cpu_dai)
 {
        struct stm32_sai_sub_data *sai = snd_soc_dai_get_drvdata(cpu_dai);
+       unsigned long flags;
 
        regmap_update_bits(sai->regmap, STM_SAI_IMR_REGX, SAI_XIMR_MASK, 0);
 
        regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX, SAI_XCR1_NODIV,
                           SAI_XCR1_NODIV);
 
-       clk_disable_unprepare(sai->sai_ck);
+       /* Release mclk rate only if rate was actually set */
+       if (sai->mclk_rate) {
+               clk_rate_exclusive_put(sai->sai_mclk);
+               sai->mclk_rate = 0;
+       }
 
-       clk_rate_exclusive_put(sai->sai_mclk);
+       clk_disable_unprepare(sai->sai_ck);
 
+       spin_lock_irqsave(&sai->irq_lock, flags);
        sai->substream = NULL;
+       spin_unlock_irqrestore(&sai->irq_lock, flags);
 }
 
 static int stm32_sai_pcm_new(struct snd_soc_pcm_runtime *rtd,
                             struct snd_soc_dai *cpu_dai)
 {
        struct stm32_sai_sub_data *sai = dev_get_drvdata(cpu_dai->dev);
+       struct snd_kcontrol_new knew = iec958_ctls;
 
        if (STM_SAI_PROTOCOL_IS_SPDIF(sai)) {
                dev_dbg(&sai->pdev->dev, "%s: register iec controls", __func__);
-               return snd_ctl_add(rtd->pcm->card,
-                                  snd_ctl_new1(&iec958_ctls, sai));
+               knew.device = rtd->pcm->device;
+               return snd_ctl_add(rtd->pcm->card, snd_ctl_new1(&knew, sai));
        }
 
        return 0;
@@ -1081,7 +1136,7 @@ static int stm32_sai_pcm_new(struct snd_soc_pcm_runtime *rtd,
 static int stm32_sai_dai_probe(struct snd_soc_dai *cpu_dai)
 {
        struct stm32_sai_sub_data *sai = dev_get_drvdata(cpu_dai->dev);
-       int cr1 = 0, cr1_mask;
+       int cr1 = 0, cr1_mask, ret;
 
        sai->cpu_dai = cpu_dai;
 
@@ -1111,8 +1166,10 @@ static int stm32_sai_dai_probe(struct snd_soc_dai *cpu_dai)
        /* Configure synchronization */
        if (sai->sync == SAI_SYNC_EXTERNAL) {
                /* Configure synchro client and provider */
-               sai->pdata->set_sync(sai->pdata, sai->np_sync_provider,
-                                    sai->synco, sai->synci);
+               ret = sai->pdata->set_sync(sai->pdata, sai->np_sync_provider,
+                                          sai->synco, sai->synci);
+               if (ret)
+                       return ret;
        }
 
        cr1_mask |= SAI_XCR1_SYNCEN_MASK;
@@ -1392,7 +1449,6 @@ static int stm32_sai_sub_dais_init(struct platform_device *pdev,
        if (!sai->cpu_dai_drv)
                return -ENOMEM;
 
-       sai->cpu_dai_drv->name = dev_name(&pdev->dev);
        if (STM_SAI_IS_PLAYBACK(sai)) {
                memcpy(sai->cpu_dai_drv, &stm32_sai_playback_dai,
                       sizeof(stm32_sai_playback_dai));
@@ -1402,6 +1458,7 @@ static int stm32_sai_sub_dais_init(struct platform_device *pdev,
                       sizeof(stm32_sai_capture_dai));
                sai->cpu_dai_drv->capture.stream_name = sai->cpu_dai_drv->name;
        }
+       sai->cpu_dai_drv->name = dev_name(&pdev->dev);
 
        return 0;
 }
@@ -1424,6 +1481,7 @@ static int stm32_sai_sub_probe(struct platform_device *pdev)
 
        sai->pdev = pdev;
        mutex_init(&sai->ctrl_lock);
+       spin_lock_init(&sai->irq_lock);
        platform_set_drvdata(pdev, sai);
 
        sai->pdata = dev_get_drvdata(pdev->dev.parent);
index a7f413cb704dc7154c42c5adf29ed242d632cfc2..b14ab512c2ce0d4ef2aceae5d673e6b528e1120c 100644 (file)
@@ -441,7 +441,7 @@ static int shbuf_setup_backstore(struct xen_snd_front_pcm_stream_info *stream,
 {
        int i;
 
-       stream->buffer = alloc_pages_exact(stream->buffer_sz, GFP_KERNEL);
+       stream->buffer = alloc_pages_exact(buffer_sz, GFP_KERNEL);
        if (!stream->buffer)
                return -ENOMEM;
 
index c317d3e6867a3770e0e3f8e4ed679172adef1d71..ea6a255ae61f7268ada806820ecafb258ca6b0eb 100644 (file)
@@ -27,8 +27,6 @@
 #define MAP_NONBLOCK   0x40000
 #define MAP_NORESERVE  0x10000
 #define MAP_POPULATE   0x20000
-#define MAP_PRIVATE    0x02
-#define MAP_SHARED     0x01
 #define MAP_STACK      0x80000
 #define PROT_EXEC      0x4
 #define PROT_GROWSDOWN 0x01000000
index 378c051fa1776534b0e1fca7e56567f9f8a3d2d4..3b9b41331c4f16ec4339ab20e32d434d89336c37 100644 (file)
 #define wmb()          asm volatile("dmb ishst" ::: "memory")
 #define rmb()          asm volatile("dmb ishld" ::: "memory")
 
+/*
+ * Kernel uses dmb variants on arm64 for smp_*() barriers. Pretty much the same
+ * implementation as above mb()/wmb()/rmb(), though for the latter kernel uses
+ * dsb. In any case, should above mb()/wmb()/rmb() change, make sure the below
+ * smp_*() don't.
+ */
+#define smp_mb()       asm volatile("dmb ish" ::: "memory")
+#define smp_wmb()      asm volatile("dmb ishst" ::: "memory")
+#define smp_rmb()      asm volatile("dmb ishld" ::: "memory")
+
 #define smp_store_release(p, v)                                                \
 do {                                                                   \
        union { typeof(*p) __val; char __c[1]; } __u =                  \
index de2206883abc0f83f21e59f9f7edf17e4ce2b85a..c8acaa138d466934a9d9adfd7934ca41da5e3279 100644 (file)
@@ -28,8 +28,6 @@
 #define MAP_NONBLOCK   0x20000
 #define MAP_NORESERVE  0x0400
 #define MAP_POPULATE   0x10000
-#define MAP_PRIVATE    0x002
-#define MAP_SHARED     0x001
 #define MAP_STACK      0x40000
 #define PROT_EXEC      0x04
 #define PROT_GROWSDOWN 0x01000000
index 1bd78758bde9815b067d705fdec7157ca3860b34..f9fd1325f5bda746d3393857f4603ae46d7e845e 100644 (file)
@@ -27,8 +27,6 @@
 #define MAP_NONBLOCK   0x20000
 #define MAP_NORESERVE  0x4000
 #define MAP_POPULATE   0x10000
-#define MAP_PRIVATE    0x02
-#define MAP_SHARED     0x01
 #define MAP_STACK      0x40000
 #define PROT_EXEC      0x4
 #define PROT_GROWSDOWN 0x01000000
index 8c876c166ef27b2c6fa754781fdbb103f2addc54..26ca425f4c2c39515bccee31029b3cada4c73639 100644 (file)
@@ -463,10 +463,12 @@ struct kvm_ppc_cpu_char {
 #define KVM_PPC_CPU_CHAR_BR_HINT_HONOURED      (1ULL << 58)
 #define KVM_PPC_CPU_CHAR_MTTRIG_THR_RECONF     (1ULL << 57)
 #define KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS       (1ULL << 56)
+#define KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST    (1ull << 54)
 
 #define KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY      (1ULL << 63)
 #define KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR         (1ULL << 62)
 #define KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR    (1ULL << 61)
+#define KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE    (1ull << 58)
 
 /* Per-vcpu XICS interrupt controller state */
 #define KVM_REG_PPC_ICP_STATE  (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x8c)
index 58919868473c134f63a2ad42bd64bac8ac46fabd..0adf295dd5b6aa9e8639d1956f481827a61fd2ca 100644 (file)
 #define rmb()  asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
 #define wmb()  asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
 #elif defined(__x86_64__)
-#define mb()   asm volatile("mfence":::"memory")
-#define rmb()  asm volatile("lfence":::"memory")
+#define mb()   asm volatile("mfence" ::: "memory")
+#define rmb()  asm volatile("lfence" ::: "memory")
 #define wmb()  asm volatile("sfence" ::: "memory")
+#define smp_rmb() barrier()
+#define smp_wmb() barrier()
+#define smp_mb()  asm volatile("lock; addl $0,-132(%%rsp)" ::: "memory", "cc")
 #endif
 
 #if defined(__x86_64__)
index 6d612252471143ee2fa850e6b3c1f13456426afe..981ff94796484426911c41e333b82ef395380caa 100644 (file)
 /* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */
 #define X86_FEATURE_AVX512_4VNNIW      (18*32+ 2) /* AVX-512 Neural Network Instructions */
 #define X86_FEATURE_AVX512_4FMAPS      (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
+#define X86_FEATURE_TSX_FORCE_ABORT    (18*32+13) /* "" TSX_FORCE_ABORT */
 #define X86_FEATURE_PCONFIG            (18*32+18) /* Intel PCONFIG */
 #define X86_FEATURE_SPEC_CTRL          (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
 #define X86_FEATURE_INTEL_STIBP                (18*32+27) /* "" Single Thread Indirect Branch Predictors */
index 34dde6f44dae517a119e9cff65261753a75b8fdc..f2b08c990afc846ed3901d94cb4cf9b5c1a8d9ed 100644 (file)
@@ -27,8 +27,6 @@
 #define MAP_NONBLOCK   0x20000
 #define MAP_NORESERVE  0x0400
 #define MAP_POPULATE   0x10000
-#define MAP_PRIVATE    0x002
-#define MAP_SHARED     0x001
 #define MAP_STACK      0x40000
 #define PROT_EXEC      0x4
 #define PROT_GROWSDOWN 0x01000000
index e63bce0755ebe605870af4b6b6e9322006f64a3d..8cafb9b314672fe238cb3d4e40b9dafcba1f2301 100644 (file)
@@ -309,6 +309,48 @@ static int btf_dumper_struct(const struct btf_dumper *d, __u32 type_id,
        return ret;
 }
 
+static int btf_dumper_var(const struct btf_dumper *d, __u32 type_id,
+                         __u8 bit_offset, const void *data)
+{
+       const struct btf_type *t = btf__type_by_id(d->btf, type_id);
+       int ret;
+
+       jsonw_start_object(d->jw);
+       jsonw_name(d->jw, btf__name_by_offset(d->btf, t->name_off));
+       ret = btf_dumper_do_type(d, t->type, bit_offset, data);
+       jsonw_end_object(d->jw);
+
+       return ret;
+}
+
+static int btf_dumper_datasec(const struct btf_dumper *d, __u32 type_id,
+                             const void *data)
+{
+       struct btf_var_secinfo *vsi;
+       const struct btf_type *t;
+       int ret = 0, i, vlen;
+
+       t = btf__type_by_id(d->btf, type_id);
+       if (!t)
+               return -EINVAL;
+
+       vlen = BTF_INFO_VLEN(t->info);
+       vsi = (struct btf_var_secinfo *)(t + 1);
+
+       jsonw_start_object(d->jw);
+       jsonw_name(d->jw, btf__name_by_offset(d->btf, t->name_off));
+       jsonw_start_array(d->jw);
+       for (i = 0; i < vlen; i++) {
+               ret = btf_dumper_do_type(d, vsi[i].type, 0, data + vsi[i].offset);
+               if (ret)
+                       break;
+       }
+       jsonw_end_array(d->jw);
+       jsonw_end_object(d->jw);
+
+       return ret;
+}
+
 static int btf_dumper_do_type(const struct btf_dumper *d, __u32 type_id,
                              __u8 bit_offset, const void *data)
 {
@@ -341,6 +383,10 @@ static int btf_dumper_do_type(const struct btf_dumper *d, __u32 type_id,
        case BTF_KIND_CONST:
        case BTF_KIND_RESTRICT:
                return btf_dumper_modifier(d, type_id, bit_offset, data);
+       case BTF_KIND_VAR:
+               return btf_dumper_var(d, type_id, bit_offset, data);
+       case BTF_KIND_DATASEC:
+               return btf_dumper_datasec(d, type_id, data);
        default:
                jsonw_printf(d->jw, "(unsupported-kind");
                return -EINVAL;
@@ -377,6 +423,7 @@ static int __btf_dumper_type_only(const struct btf *btf, __u32 type_id,
 {
        const struct btf_type *proto_type;
        const struct btf_array *array;
+       const struct btf_var *var;
        const struct btf_type *t;
 
        if (!type_id) {
@@ -440,6 +487,18 @@ static int __btf_dumper_type_only(const struct btf *btf, __u32 type_id,
                if (pos == -1)
                        return -1;
                break;
+       case BTF_KIND_VAR:
+               var = (struct btf_var *)(t + 1);
+               if (var->linkage == BTF_VAR_STATIC)
+                       BTF_PRINT_ARG("static ");
+               BTF_PRINT_TYPE(t->type);
+               BTF_PRINT_ARG(" %s",
+                             btf__name_by_offset(btf, t->name_off));
+               break;
+       case BTF_KIND_DATASEC:
+               BTF_PRINT_ARG("section (\"%s\") ",
+                             btf__name_by_offset(btf, t->name_off));
+               break;
        case BTF_KIND_UNKN:
        default:
                return -1;
index e0c650d91784acac01abe568385244b55a60fd30..e96903078991f33c330fe4431c0b3077a7881479 100644 (file)
@@ -153,11 +153,13 @@ static int do_dump_btf(const struct btf_dumper *d,
        /* start of key-value pair */
        jsonw_start_object(d->jw);
 
-       jsonw_name(d->jw, "key");
+       if (map_info->btf_key_type_id) {
+               jsonw_name(d->jw, "key");
 
-       ret = btf_dumper_type(d, map_info->btf_key_type_id, key);
-       if (ret)
-               goto err_end_obj;
+               ret = btf_dumper_type(d, map_info->btf_key_type_id, key);
+               if (ret)
+                       goto err_end_obj;
+       }
 
        if (!map_is_per_cpu(map_info->type)) {
                jsonw_name(d->jw, "value");
index d2be5a06c339155f355058946f655f38f97b5db9..81067803189ede7d980eb92ed4e23b07f1ff22a2 100644 (file)
@@ -249,6 +249,9 @@ static void print_prog_json(struct bpf_prog_info *info, int fd)
        if (info->nr_map_ids)
                show_prog_maps(fd, info->nr_map_ids);
 
+       if (info->btf_id)
+               jsonw_int_field(json_wtr, "btf_id", info->btf_id);
+
        if (!hash_empty(prog_table.table)) {
                struct pinned_obj *obj;
 
@@ -319,6 +322,9 @@ static void print_prog_plain(struct bpf_prog_info *info, int fd)
                }
        }
 
+       if (info->btf_id)
+               printf("\n\tbtf_id %d\n", info->btf_id);
+
        printf("\n");
 }
 
index 7073dbe1ff27eee87201ead853228d7be1b12028..0bb17bf88b188378cd863e733683a0e9da131c77 100644 (file)
@@ -195,6 +195,9 @@ static const char *print_imm(void *private_data,
        if (insn->src_reg == BPF_PSEUDO_MAP_FD)
                snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
                         "map[id:%u]", insn->imm);
+       else if (insn->src_reg == BPF_PSEUDO_MAP_VALUE)
+               snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
+                        "map[id:%u][0]+%u", insn->imm, (insn + 1)->imm);
        else
                snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
                         "0x%llx", (unsigned long long)full_imm);
index d68eb4fb40cc4261e6ad857d7ad656be0f6e95d6..2b0e02c3887076aa674357bbbf7ca7c24e1e7bbf 100644 (file)
@@ -4,9 +4,9 @@
 /*
  * Check OpenCSD library version is sufficient to provide required features
  */
-#define OCSD_MIN_VER ((0 << 16) | (10 << 8) | (0))
+#define OCSD_MIN_VER ((0 << 16) | (11 << 8) | (0))
 #if !defined(OCSD_VER_NUM) || (OCSD_VER_NUM < OCSD_MIN_VER)
-#error "OpenCSD >= 0.10.0 is required"
+#error "OpenCSD >= 0.11.0 is required"
 #endif
 
 int main(void)
index cce0b02c0e286030171c261df2af713ab15ce75f..ca28b6ab8db7c7d315d2e128331d7223c2113825 100644 (file)
                .off   = 0,                                     \
                .imm   = ((__u64) (IMM)) >> 32 })
 
+#define BPF_LD_IMM64_RAW_FULL(DST, SRC, OFF1, OFF2, IMM1, IMM2)        \
+       ((struct bpf_insn) {                                    \
+               .code  = BPF_LD | BPF_DW | BPF_IMM,             \
+               .dst_reg = DST,                                 \
+               .src_reg = SRC,                                 \
+               .off   = OFF1,                                  \
+               .imm   = IMM1 }),                               \
+       ((struct bpf_insn) {                                    \
+               .code  = 0, /* zero is reserved opcode */       \
+               .dst_reg = 0,                                   \
+               .src_reg = 0,                                   \
+               .off   = OFF2,                                  \
+               .imm   = IMM2 })
+
 /* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */
 
 #define BPF_LD_MAP_FD(DST, MAP_FD)                             \
-       BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD)
+       BPF_LD_IMM64_RAW_FULL(DST, BPF_PSEUDO_MAP_FD, 0, 0,     \
+                             MAP_FD, 0)
+
+#define BPF_LD_MAP_VALUE(DST, MAP_FD, VALUE_OFF)               \
+       BPF_LD_IMM64_RAW_FULL(DST, BPF_PSEUDO_MAP_VALUE, 0, 0,  \
+                             MAP_FD, VALUE_OFF)
 
 /* Relative call */
 
diff --git a/tools/include/uapi/asm-generic/mman-common-tools.h b/tools/include/uapi/asm-generic/mman-common-tools.h
new file mode 100644 (file)
index 0000000..af7d0d3
--- /dev/null
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef __ASM_GENERIC_MMAN_COMMON_TOOLS_ONLY_H
+#define __ASM_GENERIC_MMAN_COMMON_TOOLS_ONLY_H
+
+#include <asm-generic/mman-common.h>
+
+/* We need this because we need to have tools/include/uapi/ included in the tools
+ * header search path to get access to stuff that is not yet in the system's
+ * copy of the files in that directory, but since this cset:
+ *
+ *     746c9398f5ac ("arch: move common mmap flags to linux/mman.h")
+ *
+ * We end up making sys/mman.h, that is in the system headers, to not find the
+ * MAP_SHARED and MAP_PRIVATE defines because they are not anymore in our copy
+ * of asm-generic/mman-common.h. So we define them here and include this header
+ * from each of the per arch mman.h headers.
+ */
+#ifndef MAP_SHARED
+#define MAP_SHARED     0x01            /* Share changes */
+#define MAP_PRIVATE    0x02            /* Changes are private */
+#define MAP_SHARED_VALIDATE 0x03       /* share + validate extension flags */
+#endif
+#endif // __ASM_GENERIC_MMAN_COMMON_TOOLS_ONLY_H
index e7ee32861d51d4b2e47b9182a48c05fe837b8d21..abd238d0f7a48d718728cacde7853c60846bc539 100644 (file)
@@ -15,9 +15,7 @@
 #define PROT_GROWSDOWN 0x01000000      /* mprotect flag: extend change to start of growsdown vma */
 #define PROT_GROWSUP   0x02000000      /* mprotect flag: extend change to end of growsup vma */
 
-#define MAP_SHARED     0x01            /* Share changes */
-#define MAP_PRIVATE    0x02            /* Changes are private */
-#define MAP_SHARED_VALIDATE 0x03       /* share + validate extension flags */
+/* 0x01 - 0x03 are defined in linux/mman.h */
 #define MAP_TYPE       0x0f            /* Mask for type of mapping */
 #define MAP_FIXED      0x10            /* Interpret addr exactly */
 #define MAP_ANONYMOUS  0x20            /* don't use a file */
index 653687d9771b9d0e824fe4b25eee079f7fcec1cc..36c197fc44a0d5df08110715f11cb8b3eb6caff7 100644 (file)
@@ -2,7 +2,7 @@
 #ifndef __ASM_GENERIC_MMAN_H
 #define __ASM_GENERIC_MMAN_H
 
-#include <asm-generic/mman-common.h>
+#include <asm-generic/mman-common-tools.h>
 
 #define MAP_GROWSDOWN  0x0100          /* stack-like segment */
 #define MAP_DENYWRITE  0x0800          /* ETXTBSY */
index 12cdf611d217e1ace70b3d0a19e1e8242c99bc88..dee7292e1df6b162a12d0e55e9ccdf875fad428d 100644 (file)
@@ -824,8 +824,17 @@ __SYSCALL(__NR_futex_time64, sys_futex)
 __SYSCALL(__NR_sched_rr_get_interval_time64, sys_sched_rr_get_interval)
 #endif
 
+#define __NR_pidfd_send_signal 424
+__SYSCALL(__NR_pidfd_send_signal, sys_pidfd_send_signal)
+#define __NR_io_uring_setup 425
+__SYSCALL(__NR_io_uring_setup, sys_io_uring_setup)
+#define __NR_io_uring_enter 426
+__SYSCALL(__NR_io_uring_enter, sys_io_uring_enter)
+#define __NR_io_uring_register 427
+__SYSCALL(__NR_io_uring_register, sys_io_uring_register)
+
 #undef __NR_syscalls
-#define __NR_syscalls 424
+#define __NR_syscalls 428
 
 /*
  * 32 bit systems traditionally used different
index 298b2e197744bbc28782d1a853e1ee3577f02bee..397810fa2d33c95f69770bdf3563ea44213b40c6 100644 (file)
@@ -1486,9 +1486,73 @@ struct drm_i915_gem_context_param {
 #define   I915_CONTEXT_MAX_USER_PRIORITY       1023 /* inclusive */
 #define   I915_CONTEXT_DEFAULT_PRIORITY                0
 #define   I915_CONTEXT_MIN_USER_PRIORITY       -1023 /* inclusive */
+       /*
+        * When using the following param, value should be a pointer to
+        * drm_i915_gem_context_param_sseu.
+        */
+#define I915_CONTEXT_PARAM_SSEU                0x7
        __u64 value;
 };
 
+/**
+ * Context SSEU programming
+ *
+ * It may be necessary for either functional or performance reason to configure
+ * a context to run with a reduced number of SSEU (where SSEU stands for Slice/
+ * Sub-slice/EU).
+ *
+ * This is done by configuring SSEU configuration using the below
+ * @struct drm_i915_gem_context_param_sseu for every supported engine which
+ * userspace intends to use.
+ *
+ * Not all GPUs or engines support this functionality in which case an error
+ * code -ENODEV will be returned.
+ *
+ * Also, flexibility of possible SSEU configuration permutations varies between
+ * GPU generations and software imposed limitations. Requesting such a
+ * combination will return an error code of -EINVAL.
+ *
+ * NOTE: When perf/OA is active the context's SSEU configuration is ignored in
+ * favour of a single global setting.
+ */
+struct drm_i915_gem_context_param_sseu {
+       /*
+        * Engine class & instance to be configured or queried.
+        */
+       __u16 engine_class;
+       __u16 engine_instance;
+
+       /*
+        * Unused for now. Must be cleared to zero.
+        */
+       __u32 flags;
+
+       /*
+        * Mask of slices to enable for the context. Valid values are a subset
+        * of the bitmask value returned for I915_PARAM_SLICE_MASK.
+        */
+       __u64 slice_mask;
+
+       /*
+        * Mask of subslices to enable for the context. Valid values are a
+        * subset of the bitmask value return by I915_PARAM_SUBSLICE_MASK.
+        */
+       __u64 subslice_mask;
+
+       /*
+        * Minimum/Maximum number of EUs to enable per subslice for the
+        * context. min_eus_per_subslice must be inferior or equal to
+        * max_eus_per_subslice.
+        */
+       __u16 min_eus_per_subslice;
+       __u16 max_eus_per_subslice;
+
+       /*
+        * Unused for now. Must be cleared to zero.
+        */
+       __u32 rsvd;
+};
+
 enum drm_i915_oa_format {
        I915_OA_FORMAT_A13 = 1,     /* HSW only */
        I915_OA_FORMAT_A29,         /* HSW only */
index 837024512bafd92c3773282ac5362d826fc93502..2e96d0b4bf65d6fd5f113487e432c66f43e0aa8d 100644 (file)
@@ -105,6 +105,7 @@ enum bpf_cmd {
        BPF_BTF_GET_FD_BY_ID,
        BPF_TASK_FD_QUERY,
        BPF_MAP_LOOKUP_AND_DELETE_ELEM,
+       BPF_MAP_FREEZE,
 };
 
 enum bpf_map_type {
@@ -255,8 +256,19 @@ enum bpf_attach_type {
  */
 #define BPF_F_ANY_ALIGNMENT    (1U << 1)
 
-/* when bpf_ldimm64->src_reg == BPF_PSEUDO_MAP_FD, bpf_ldimm64->imm == fd */
+/* When BPF ldimm64's insn[0].src_reg != 0 then this can have
+ * two extensions:
+ *
+ * insn[0].src_reg:  BPF_PSEUDO_MAP_FD   BPF_PSEUDO_MAP_VALUE
+ * insn[0].imm:      map fd              map fd
+ * insn[1].imm:      0                   offset into value
+ * insn[0].off:      0                   0
+ * insn[1].off:      0                   0
+ * ldimm64 rewrite:  address of map      address of map[0]+offset
+ * verifier type:    CONST_PTR_TO_MAP    PTR_TO_MAP_VALUE
+ */
 #define BPF_PSEUDO_MAP_FD      1
+#define BPF_PSEUDO_MAP_VALUE   2
 
 /* when bpf_call->src_reg == BPF_PSEUDO_CALL, bpf_call->imm == pc-relative
  * offset to another bpf function
@@ -283,7 +295,7 @@ enum bpf_attach_type {
 
 #define BPF_OBJ_NAME_LEN 16U
 
-/* Flags for accessing BPF object */
+/* Flags for accessing BPF object from syscall side. */
 #define BPF_F_RDONLY           (1U << 3)
 #define BPF_F_WRONLY           (1U << 4)
 
@@ -293,6 +305,10 @@ enum bpf_attach_type {
 /* Zero-initialize hash function seed. This should only be used for testing. */
 #define BPF_F_ZERO_SEED                (1U << 6)
 
+/* Flags for accessing BPF object from program side. */
+#define BPF_F_RDONLY_PROG      (1U << 7)
+#define BPF_F_WRONLY_PROG      (1U << 8)
+
 /* flags for BPF_PROG_QUERY */
 #define BPF_F_QUERY_EFFECTIVE  (1U << 0)
 
@@ -396,6 +412,13 @@ union bpf_attr {
                __aligned_u64   data_out;
                __u32           repeat;
                __u32           duration;
+               __u32           ctx_size_in;    /* input: len of ctx_in */
+               __u32           ctx_size_out;   /* input/output: len of ctx_out
+                                                *   returns ENOSPC if ctx_out
+                                                *   is too small.
+                                                */
+               __aligned_u64   ctx_in;
+               __aligned_u64   ctx_out;
        } test;
 
        struct { /* anonymous struct used by BPF_*_GET_*_ID */
@@ -1500,6 +1523,10 @@ union bpf_attr {
  *             * **BPF_F_ADJ_ROOM_ENCAP_L4_UDP **:
  *               Use with ENCAP_L3 flags to further specify the tunnel type.
  *
+ *             * **BPF_F_ADJ_ROOM_ENCAP_L2(len) **:
+ *               Use with ENCAP_L3/L4 flags to further specify the tunnel
+ *               type; **len** is the length of the inner MAC header.
+ *
  *             A call to this helper is susceptible to change the underlaying
  *             packet buffer. Therefore, at load time, all checks on pointers
  *             previously done by the verifier are invalidated and must be
@@ -2641,10 +2668,16 @@ enum bpf_func_id {
 /* BPF_FUNC_skb_adjust_room flags. */
 #define BPF_F_ADJ_ROOM_FIXED_GSO       (1ULL << 0)
 
+#define        BPF_ADJ_ROOM_ENCAP_L2_MASK      0xff
+#define        BPF_ADJ_ROOM_ENCAP_L2_SHIFT     56
+
 #define BPF_F_ADJ_ROOM_ENCAP_L3_IPV4   (1ULL << 1)
 #define BPF_F_ADJ_ROOM_ENCAP_L3_IPV6   (1ULL << 2)
 #define BPF_F_ADJ_ROOM_ENCAP_L4_GRE    (1ULL << 3)
 #define BPF_F_ADJ_ROOM_ENCAP_L4_UDP    (1ULL << 4)
+#define        BPF_F_ADJ_ROOM_ENCAP_L2(len)    (((__u64)len & \
+                                         BPF_ADJ_ROOM_ENCAP_L2_MASK) \
+                                        << BPF_ADJ_ROOM_ENCAP_L2_SHIFT)
 
 /* Mode for BPF_FUNC_skb_adjust_room helper. */
 enum bpf_adj_room_mode {
index 7b7475ef2f175c9279915aa8d9eb014845533bd9..9310652ca4f96bb56c6f7f561332bcff01511f3a 100644 (file)
@@ -39,11 +39,11 @@ struct btf_type {
         *             struct, union and fwd
         */
        __u32 info;
-       /* "size" is used by INT, ENUM, STRUCT and UNION.
+       /* "size" is used by INT, ENUM, STRUCT, UNION and DATASEC.
         * "size" tells the size of the type it is describing.
         *
         * "type" is used by PTR, TYPEDEF, VOLATILE, CONST, RESTRICT,
-        * FUNC and FUNC_PROTO.
+        * FUNC, FUNC_PROTO and VAR.
         * "type" is a type_id referring to another type.
         */
        union {
@@ -70,8 +70,10 @@ struct btf_type {
 #define BTF_KIND_RESTRICT      11      /* Restrict     */
 #define BTF_KIND_FUNC          12      /* Function     */
 #define BTF_KIND_FUNC_PROTO    13      /* Function Proto       */
-#define BTF_KIND_MAX           13
-#define NR_BTF_KINDS           14
+#define BTF_KIND_VAR           14      /* Variable     */
+#define BTF_KIND_DATASEC       15      /* Section      */
+#define BTF_KIND_MAX           BTF_KIND_DATASEC
+#define NR_BTF_KINDS           (BTF_KIND_MAX + 1)
 
 /* For some specific BTF_KIND, "struct btf_type" is immediately
  * followed by extra data.
@@ -138,4 +140,26 @@ struct btf_param {
        __u32   type;
 };
 
+enum {
+       BTF_VAR_STATIC = 0,
+       BTF_VAR_GLOBAL_ALLOCATED,
+};
+
+/* BTF_KIND_VAR is followed by a single "struct btf_var" to describe
+ * additional information related to the variable such as its linkage.
+ */
+struct btf_var {
+       __u32   linkage;
+};
+
+/* BTF_KIND_DATASEC is followed by multiple "struct btf_var_secinfo"
+ * to describe all BTF_KIND_VAR types it contains along with it's
+ * in-section offset as well as size.
+ */
+struct btf_var_secinfo {
+       __u32   type;
+       __u32   offset;
+       __u32   size;
+};
+
 #endif /* _UAPI__LINUX_BTF_H__ */
index 6448cdd9a350d3a0c6513c7a76aad29a68689bd4..a2f8658f1c555235de9fb4581acb113339981bdf 100644 (file)
@@ -41,6 +41,7 @@
 #define F_SEAL_SHRINK  0x0002  /* prevent file from shrinking */
 #define F_SEAL_GROW    0x0004  /* prevent file from growing */
 #define F_SEAL_WRITE   0x0008  /* prevent writes */
+#define F_SEAL_FUTURE_WRITE    0x0010  /* prevent future writes while mapped */
 /* (1U << 31) is reserved for signed error codes */
 
 /*
index d0f515d53299ea5784ffdb61dd1b829b04fd045c..fc1a64c3447bf6e329bba5e155053332794a0187 100644 (file)
 #define OVERCOMMIT_ALWAYS              1
 #define OVERCOMMIT_NEVER               2
 
+#define MAP_SHARED     0x01            /* Share changes */
+#define MAP_PRIVATE    0x02            /* Changes are private */
+#define MAP_SHARED_VALIDATE 0x03       /* share + validate extension flags */
+
 /*
  * Huge page size encoding when MAP_HUGETLB is specified, and a huge page
  * size other than the default is desired.  See hugetlb_encode.h.
index 512306a37531d829b880ebbe841aa87c1949b28f..0f257139b003e117eee62b82c87e2f66a20f8e36 100644 (file)
 #include "liburing.h"
 #include "barrier.h"
 
-#ifndef IOCQE_FLAG_CACHEHIT
-#define IOCQE_FLAG_CACHEHIT    (1U << 0)
-#endif
-
 #define min(a, b)              ((a < b) ? (a) : (b))
 
 struct io_sq_ring {
@@ -85,7 +81,6 @@ struct submitter {
        unsigned long reaps;
        unsigned long done;
        unsigned long calls;
-       unsigned long cachehit, cachemiss;
        volatile int finish;
 
        __s32 *fds;
@@ -270,10 +265,6 @@ static int reap_events(struct submitter *s)
                                return -1;
                        }
                }
-               if (cqe->flags & IOCQE_FLAG_CACHEHIT)
-                       s->cachehit++;
-               else
-                       s->cachemiss++;
                reaped++;
                head++;
        } while (1);
@@ -489,7 +480,7 @@ static void file_depths(char *buf)
 int main(int argc, char *argv[])
 {
        struct submitter *s = &submitters[0];
-       unsigned long done, calls, reap, cache_hit, cache_miss;
+       unsigned long done, calls, reap;
        int err, i, flags, fd;
        char *fdepths;
        void *ret;
@@ -569,44 +560,29 @@ int main(int argc, char *argv[])
        pthread_create(&s->thread, NULL, submitter_fn, s);
 
        fdepths = malloc(8 * s->nr_files);
-       cache_hit = cache_miss = reap = calls = done = 0;
+       reap = calls = done = 0;
        do {
                unsigned long this_done = 0;
                unsigned long this_reap = 0;
                unsigned long this_call = 0;
-               unsigned long this_cache_hit = 0;
-               unsigned long this_cache_miss = 0;
                unsigned long rpc = 0, ipc = 0;
-               double hit = 0.0;
 
                sleep(1);
                this_done += s->done;
                this_call += s->calls;
                this_reap += s->reaps;
-               this_cache_hit += s->cachehit;
-               this_cache_miss += s->cachemiss;
-               if (this_cache_hit && this_cache_miss) {
-                       unsigned long hits, total;
-
-                       hits = this_cache_hit - cache_hit;
-                       total = hits + this_cache_miss - cache_miss;
-                       hit = (double) hits / (double) total;
-                       hit *= 100.0;
-               }
                if (this_call - calls) {
                        rpc = (this_done - done) / (this_call - calls);
                        ipc = (this_reap - reap) / (this_call - calls);
                } else
                        rpc = ipc = -1;
                file_depths(fdepths);
-               printf("IOPS=%lu, IOS/call=%ld/%ld, inflight=%u (%s), Cachehit=%0.2f%%\n",
+               printf("IOPS=%lu, IOS/call=%ld/%ld, inflight=%u (%s)\n",
                                this_done - done, rpc, ipc, s->inflight,
-                               fdepths, hit);
+                               fdepths);
                done = this_done;
                calls = this_call;
                reap = this_reap;
-               cache_hit = s->cachehit;
-               cache_miss = s->cachemiss;
        } while (!finish);
 
        pthread_join(s->thread, &ret);
index 4db74758c6743e2a32800eef302f9cdeddc8a2ee..7d9e182a1f51faf0e567a7e02989fcb5a8d8f422 100644 (file)
@@ -1,3 +1,4 @@
 libbpf_version.h
+libbpf.pc
 FEATURE-DUMP.libbpf
 test_libbpf
index 5bf8e52c41fcaf2bb38127d4bb076a9164539ddb..c6c06bc6683cbd783bad40d98abdea1436b5a285 100644 (file)
@@ -3,7 +3,7 @@
 
 BPF_VERSION = 0
 BPF_PATCHLEVEL = 0
-BPF_EXTRAVERSION = 2
+BPF_EXTRAVERSION = 3
 
 MAKEFLAGS += --no-print-directory
 
@@ -90,6 +90,7 @@ LIBBPF_VERSION        = $(BPF_VERSION).$(BPF_PATCHLEVEL).$(BPF_EXTRAVERSION)
 
 LIB_TARGET     = libbpf.a libbpf.so.$(LIBBPF_VERSION)
 LIB_FILE       = libbpf.a libbpf.so*
+PC_FILE                = libbpf.pc
 
 # Set compile option CFLAGS
 ifdef EXTRA_CFLAGS
@@ -134,13 +135,14 @@ VERSION_SCRIPT    := libbpf.map
 
 LIB_TARGET     := $(addprefix $(OUTPUT),$(LIB_TARGET))
 LIB_FILE       := $(addprefix $(OUTPUT),$(LIB_FILE))
+PC_FILE                := $(addprefix $(OUTPUT),$(PC_FILE))
 
 GLOBAL_SYM_COUNT = $(shell readelf -s --wide $(BPF_IN) | \
                           awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {s++} END{print s}')
 VERSIONED_SYM_COUNT = $(shell readelf -s --wide $(OUTPUT)libbpf.so | \
                              grep -Eo '[^ ]+@LIBBPF_' | cut -d@ -f1 | sort -u | wc -l)
 
-CMD_TARGETS = $(LIB_TARGET)
+CMD_TARGETS = $(LIB_TARGET) $(PC_FILE)
 
 CXX_TEST_TARGET = $(OUTPUT)test_libbpf
 
@@ -177,7 +179,7 @@ $(OUTPUT)libbpf.so: $(OUTPUT)libbpf.so.$(LIBBPF_VERSION)
 
 $(OUTPUT)libbpf.so.$(LIBBPF_VERSION): $(BPF_IN)
        $(QUIET_LINK)$(CC) --shared -Wl,-soname,libbpf.so.$(VERSION) \
-                                   -Wl,--version-script=$(VERSION_SCRIPT) $^ -o $@
+                                   -Wl,--version-script=$(VERSION_SCRIPT) $^ -lelf -o $@
        @ln -sf $(@F) $(OUTPUT)libbpf.so
        @ln -sf $(@F) $(OUTPUT)libbpf.so.$(VERSION)
 
@@ -187,6 +189,12 @@ $(OUTPUT)libbpf.a: $(BPF_IN)
 $(OUTPUT)test_libbpf: test_libbpf.cpp $(OUTPUT)libbpf.a
        $(QUIET_LINK)$(CXX) $(INCLUDES) $^ -lelf -o $@
 
+$(OUTPUT)libbpf.pc:
+       $(QUIET_GEN)sed -e "s|@PREFIX@|$(prefix)|" \
+               -e "s|@LIBDIR@|$(libdir_SQ)|" \
+               -e "s|@VERSION@|$(LIBBPF_VERSION)|" \
+               < libbpf.pc.template > $@
+
 check: check_abi
 
 check_abi: $(OUTPUT)libbpf.so
@@ -220,10 +228,15 @@ install_lib: all_cmd
 install_headers:
        $(call QUIET_INSTALL, headers) \
                $(call do_install,bpf.h,$(prefix)/include/bpf,644); \
-               $(call do_install,libbpf.h,$(prefix)/include/bpf,644);
-               $(call do_install,btf.h,$(prefix)/include/bpf,644);
+               $(call do_install,libbpf.h,$(prefix)/include/bpf,644); \
+               $(call do_install,btf.h,$(prefix)/include/bpf,644); \
+               $(call do_install,xsk.h,$(prefix)/include/bpf,644);
+
+install_pkgconfig: $(PC_FILE)
+       $(call QUIET_INSTALL, $(PC_FILE)) \
+               $(call do_install,$(PC_FILE),$(libdir_SQ)/pkgconfig,644)
 
-install: install_lib
+install: install_lib install_pkgconfig
 
 ### Cleaning rules
 
@@ -233,7 +246,7 @@ config-clean:
 
 clean:
        $(call QUIET_CLEAN, libbpf) $(RM) $(TARGETS) $(CXX_TEST_TARGET) \
-               *.o *~ *.a *.so *.so.$(VERSION) .*.d .*.cmd LIBBPF-CFLAGS
+               *.o *~ *.a *.so *.so.$(VERSION) .*.d .*.cmd *.pc LIBBPF-CFLAGS
        $(call QUIET_CLEAN, core-gen) $(RM) $(OUTPUT)FEATURE-DUMP.libbpf
 
 
index 9cd015574e83828d20ace576fe7190f68a924829..955191c64b64e4d967c1911db940edaada5a115c 100644 (file)
@@ -79,7 +79,6 @@ static inline int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size)
 
 int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr)
 {
-       __u32 name_len = create_attr->name ? strlen(create_attr->name) : 0;
        union bpf_attr attr;
 
        memset(&attr, '\0', sizeof(attr));
@@ -89,8 +88,9 @@ int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr)
        attr.value_size = create_attr->value_size;
        attr.max_entries = create_attr->max_entries;
        attr.map_flags = create_attr->map_flags;
-       memcpy(attr.map_name, create_attr->name,
-              min(name_len, BPF_OBJ_NAME_LEN - 1));
+       if (create_attr->name)
+               memcpy(attr.map_name, create_attr->name,
+                      min(strlen(create_attr->name), BPF_OBJ_NAME_LEN - 1));
        attr.numa_node = create_attr->numa_node;
        attr.btf_fd = create_attr->btf_fd;
        attr.btf_key_type_id = create_attr->btf_key_type_id;
@@ -155,7 +155,6 @@ int bpf_create_map_in_map_node(enum bpf_map_type map_type, const char *name,
                               int key_size, int inner_map_fd, int max_entries,
                               __u32 map_flags, int node)
 {
-       __u32 name_len = name ? strlen(name) : 0;
        union bpf_attr attr;
 
        memset(&attr, '\0', sizeof(attr));
@@ -166,7 +165,9 @@ int bpf_create_map_in_map_node(enum bpf_map_type map_type, const char *name,
        attr.inner_map_fd = inner_map_fd;
        attr.max_entries = max_entries;
        attr.map_flags = map_flags;
-       memcpy(attr.map_name, name, min(name_len, BPF_OBJ_NAME_LEN - 1));
+       if (name)
+               memcpy(attr.map_name, name,
+                      min(strlen(name), BPF_OBJ_NAME_LEN - 1));
 
        if (node >= 0) {
                attr.map_flags |= BPF_F_NUMA_NODE;
@@ -216,18 +217,15 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
        void *finfo = NULL, *linfo = NULL;
        union bpf_attr attr;
        __u32 log_level;
-       __u32 name_len;
        int fd;
 
        if (!load_attr || !log_buf != !log_buf_sz)
                return -EINVAL;
 
        log_level = load_attr->log_level;
-       if (log_level > 2 || (log_level && !log_buf))
+       if (log_level > (4 | 2 | 1) || (log_level && !log_buf))
                return -EINVAL;
 
-       name_len = load_attr->name ? strlen(load_attr->name) : 0;
-
        memset(&attr, 0, sizeof(attr));
        attr.prog_type = load_attr->prog_type;
        attr.expected_attach_type = load_attr->expected_attach_type;
@@ -253,8 +251,9 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
        attr.line_info_rec_size = load_attr->line_info_rec_size;
        attr.line_info_cnt = load_attr->line_info_cnt;
        attr.line_info = ptr_to_u64(load_attr->line_info);
-       memcpy(attr.prog_name, load_attr->name,
-              min(name_len, BPF_OBJ_NAME_LEN - 1));
+       if (load_attr->name)
+               memcpy(attr.prog_name, load_attr->name,
+                      min(strlen(load_attr->name), BPF_OBJ_NAME_LEN - 1));
 
        fd = sys_bpf_prog_load(&attr, sizeof(attr));
        if (fd >= 0)
@@ -429,6 +428,16 @@ int bpf_map_get_next_key(int fd, const void *key, void *next_key)
        return sys_bpf(BPF_MAP_GET_NEXT_KEY, &attr, sizeof(attr));
 }
 
+int bpf_map_freeze(int fd)
+{
+       union bpf_attr attr;
+
+       memset(&attr, 0, sizeof(attr));
+       attr.map_fd = fd;
+
+       return sys_bpf(BPF_MAP_FREEZE, &attr, sizeof(attr));
+}
+
 int bpf_obj_pin(int fd, const char *pathname)
 {
        union bpf_attr attr;
@@ -545,10 +554,15 @@ int bpf_prog_test_run_xattr(struct bpf_prog_test_run_attr *test_attr)
        attr.test.data_out = ptr_to_u64(test_attr->data_out);
        attr.test.data_size_in = test_attr->data_size_in;
        attr.test.data_size_out = test_attr->data_size_out;
+       attr.test.ctx_in = ptr_to_u64(test_attr->ctx_in);
+       attr.test.ctx_out = ptr_to_u64(test_attr->ctx_out);
+       attr.test.ctx_size_in = test_attr->ctx_size_in;
+       attr.test.ctx_size_out = test_attr->ctx_size_out;
        attr.test.repeat = test_attr->repeat;
 
        ret = sys_bpf(BPF_PROG_TEST_RUN, &attr, sizeof(attr));
        test_attr->data_size_out = attr.test.data_size_out;
+       test_attr->ctx_size_out = attr.test.ctx_size_out;
        test_attr->retval = attr.test.retval;
        test_attr->duration = attr.test.duration;
        return ret;
index 6ffdd79bea89df1e8e2d015130f15a93e504ff94..bc30783d14033a64801589ade7716bea8840311b 100644 (file)
@@ -92,7 +92,7 @@ struct bpf_load_program_attr {
 #define MAPS_RELAX_COMPAT      0x01
 
 /* Recommend log buffer size */
-#define BPF_LOG_BUF_SIZE (256 * 1024)
+#define BPF_LOG_BUF_SIZE (16 * 1024 * 1024) /* verifier maximum in kernels <= 5.1 */
 LIBBPF_API int
 bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
                       char *log_buf, size_t log_buf_sz);
@@ -117,6 +117,7 @@ LIBBPF_API int bpf_map_lookup_and_delete_elem(int fd, const void *key,
                                              void *value);
 LIBBPF_API int bpf_map_delete_elem(int fd, const void *key);
 LIBBPF_API int bpf_map_get_next_key(int fd, const void *key, void *next_key);
+LIBBPF_API int bpf_map_freeze(int fd);
 LIBBPF_API int bpf_obj_pin(int fd, const char *pathname);
 LIBBPF_API int bpf_obj_get(const char *pathname);
 LIBBPF_API int bpf_prog_attach(int prog_fd, int attachable_fd,
@@ -135,6 +136,11 @@ struct bpf_prog_test_run_attr {
                              * out: length of data_out */
        __u32 retval;        /* out: return code of the BPF program */
        __u32 duration;      /* out: average per repetition in ns */
+       const void *ctx_in; /* optional */
+       __u32 ctx_size_in;
+       void *ctx_out;      /* optional */
+       __u32 ctx_size_out; /* in: max length of ctx_out
+                            * out: length of cxt_out */
 };
 
 LIBBPF_API int bpf_prog_test_run_xattr(struct bpf_prog_test_run_attr *test_attr);
index 87e3020ac1bc8b3772d98ce58751fc2d6f979184..701e7e28ada3830bd1e4b7e0cdf3456c10e9b257 100644 (file)
@@ -24,6 +24,8 @@
                ((k) == BTF_KIND_CONST) || \
                ((k) == BTF_KIND_RESTRICT))
 
+#define IS_VAR(k) ((k) == BTF_KIND_VAR)
+
 static struct btf_type btf_void;
 
 struct btf {
@@ -212,6 +214,10 @@ static int btf_type_size(struct btf_type *t)
                return base_size + vlen * sizeof(struct btf_member);
        case BTF_KIND_FUNC_PROTO:
                return base_size + vlen * sizeof(struct btf_param);
+       case BTF_KIND_VAR:
+               return base_size + sizeof(struct btf_var);
+       case BTF_KIND_DATASEC:
+               return base_size + vlen * sizeof(struct btf_var_secinfo);
        default:
                pr_debug("Unsupported BTF_KIND:%u\n", BTF_INFO_KIND(t->info));
                return -EINVAL;
@@ -283,6 +289,7 @@ __s64 btf__resolve_size(const struct btf *btf, __u32 type_id)
                case BTF_KIND_STRUCT:
                case BTF_KIND_UNION:
                case BTF_KIND_ENUM:
+               case BTF_KIND_DATASEC:
                        size = t->size;
                        goto done;
                case BTF_KIND_PTR:
@@ -292,6 +299,7 @@ __s64 btf__resolve_size(const struct btf *btf, __u32 type_id)
                case BTF_KIND_VOLATILE:
                case BTF_KIND_CONST:
                case BTF_KIND_RESTRICT:
+               case BTF_KIND_VAR:
                        type_id = t->type;
                        break;
                case BTF_KIND_ARRAY:
@@ -326,7 +334,8 @@ int btf__resolve_type(const struct btf *btf, __u32 type_id)
        t = btf__type_by_id(btf, type_id);
        while (depth < MAX_RESOLVE_DEPTH &&
               !btf_type_is_void_or_null(t) &&
-              IS_MODIFIER(BTF_INFO_KIND(t->info))) {
+              (IS_MODIFIER(BTF_INFO_KIND(t->info)) ||
+               IS_VAR(BTF_INFO_KIND(t->info)))) {
                type_id = t->type;
                t = btf__type_by_id(btf, type_id);
                depth++;
@@ -408,6 +417,92 @@ struct btf *btf__new(__u8 *data, __u32 size)
        return btf;
 }
 
+static int compare_vsi_off(const void *_a, const void *_b)
+{
+       const struct btf_var_secinfo *a = _a;
+       const struct btf_var_secinfo *b = _b;
+
+       return a->offset - b->offset;
+}
+
+static int btf_fixup_datasec(struct bpf_object *obj, struct btf *btf,
+                            struct btf_type *t)
+{
+       __u32 size = 0, off = 0, i, vars = BTF_INFO_VLEN(t->info);
+       const char *name = btf__name_by_offset(btf, t->name_off);
+       const struct btf_type *t_var;
+       struct btf_var_secinfo *vsi;
+       struct btf_var *var;
+       int ret;
+
+       if (!name) {
+               pr_debug("No name found in string section for DATASEC kind.\n");
+               return -ENOENT;
+       }
+
+       ret = bpf_object__section_size(obj, name, &size);
+       if (ret || !size || (t->size && t->size != size)) {
+               pr_debug("Invalid size for section %s: %u bytes\n", name, size);
+               return -ENOENT;
+       }
+
+       t->size = size;
+
+       for (i = 0, vsi = (struct btf_var_secinfo *)(t + 1);
+            i < vars; i++, vsi++) {
+               t_var = btf__type_by_id(btf, vsi->type);
+               var = (struct btf_var *)(t_var + 1);
+
+               if (BTF_INFO_KIND(t_var->info) != BTF_KIND_VAR) {
+                       pr_debug("Non-VAR type seen in section %s\n", name);
+                       return -EINVAL;
+               }
+
+               if (var->linkage == BTF_VAR_STATIC)
+                       continue;
+
+               name = btf__name_by_offset(btf, t_var->name_off);
+               if (!name) {
+                       pr_debug("No name found in string section for VAR kind\n");
+                       return -ENOENT;
+               }
+
+               ret = bpf_object__variable_offset(obj, name, &off);
+               if (ret) {
+                       pr_debug("No offset found in symbol table for VAR %s\n", name);
+                       return -ENOENT;
+               }
+
+               vsi->offset = off;
+       }
+
+       qsort(t + 1, vars, sizeof(*vsi), compare_vsi_off);
+       return 0;
+}
+
+int btf__finalize_data(struct bpf_object *obj, struct btf *btf)
+{
+       int err = 0;
+       __u32 i;
+
+       for (i = 1; i <= btf->nr_types; i++) {
+               struct btf_type *t = btf->types[i];
+
+               /* Loader needs to fix up some of the things compiler
+                * couldn't get its hands on while emitting BTF. This
+                * is section size and global variable offset. We use
+                * the info from the ELF itself for this purpose.
+                */
+               if (BTF_INFO_KIND(t->info) == BTF_KIND_DATASEC) {
+                       err = btf_fixup_datasec(obj, btf, t);
+                       if (err)
+                               break;
+               }
+       }
+
+       return err;
+}
+
 int btf__load(struct btf *btf)
 {
        __u32 log_buf_size = BPF_LOG_BUF_SIZE;
@@ -2107,6 +2202,9 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,
                return fwd_kind == real_kind;
        }
 
+       if (cand_kind != canon_kind)
+               return 0;
+
        switch (cand_kind) {
        case BTF_KIND_INT:
                return btf_equal_int(cand_type, canon_type);
index 28a1e1e59861927dc31783b74ee11be302667f32..c7b399e81fcefae69ac07e48dd103811924d1e29 100644 (file)
@@ -21,6 +21,8 @@ struct btf;
 struct btf_ext;
 struct btf_type;
 
+struct bpf_object;
+
 /*
  * The .BTF.ext ELF section layout defined as
  *   struct btf_ext_header
@@ -57,6 +59,7 @@ struct btf_ext_header {
 
 LIBBPF_API void btf__free(struct btf *btf);
 LIBBPF_API struct btf *btf__new(__u8 *data, __u32 size);
+LIBBPF_API int btf__finalize_data(struct bpf_object *obj, struct btf *btf);
 LIBBPF_API int btf__load(struct btf *btf);
 LIBBPF_API __s32 btf__find_by_name(const struct btf *btf,
                                   const char *type_name);
index 11c25d9ea43124fc6e67fab5a7dc8b93f5dc9e4a..67484cf32b2e59dd6fd5c661c899ef86857f48b2 100644 (file)
@@ -7,6 +7,7 @@
  * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
  * Copyright (C) 2015 Huawei Inc.
  * Copyright (C) 2017 Nicira, Inc.
+ * Copyright (C) 2019 Isovalent, Inc.
  */
 
 #ifndef _GNU_SOURCE
 #define BPF_FS_MAGIC           0xcafe4a11
 #endif
 
+/* vsprintf() in __base_pr() uses nonliteral format string. It may break
+ * compilation if user enables corresponding warning. Disable it explicitly.
+ */
+#pragma GCC diagnostic ignored "-Wformat-nonliteral"
+
 #define __printf(a, b) __attribute__((format(printf, a, b)))
 
 static int __base_pr(enum libbpf_print_level level, const char *format,
@@ -144,6 +150,7 @@ struct bpf_program {
                enum {
                        RELO_LD64,
                        RELO_CALL,
+                       RELO_DATA,
                } type;
                int insn_idx;
                union {
@@ -152,6 +159,7 @@ struct bpf_program {
                };
        } *reloc_desc;
        int nr_reloc;
+       int log_level;
 
        struct {
                int nr;
@@ -176,6 +184,19 @@ struct bpf_program {
        __u32 line_info_cnt;
 };
 
+enum libbpf_map_type {
+       LIBBPF_MAP_UNSPEC,
+       LIBBPF_MAP_DATA,
+       LIBBPF_MAP_BSS,
+       LIBBPF_MAP_RODATA,
+};
+
+static const char * const libbpf_type_to_btf_name[] = {
+       [LIBBPF_MAP_DATA]       = ".data",
+       [LIBBPF_MAP_BSS]        = ".bss",
+       [LIBBPF_MAP_RODATA]     = ".rodata",
+};
+
 struct bpf_map {
        int fd;
        char *name;
@@ -187,11 +208,18 @@ struct bpf_map {
        __u32 btf_value_type_id;
        void *priv;
        bpf_map_clear_priv_t clear_priv;
+       enum libbpf_map_type libbpf_type;
+};
+
+struct bpf_secdata {
+       void *rodata;
+       void *data;
 };
 
 static LIST_HEAD(bpf_objects_list);
 
 struct bpf_object {
+       char name[BPF_OBJ_NAME_LEN];
        char license[64];
        __u32 kern_version;
 
@@ -199,6 +227,7 @@ struct bpf_object {
        size_t nr_programs;
        struct bpf_map *maps;
        size_t nr_maps;
+       struct bpf_secdata sections;
 
        bool loaded;
        bool has_pseudo_calls;
@@ -214,6 +243,9 @@ struct bpf_object {
                Elf *elf;
                GElf_Ehdr ehdr;
                Elf_Data *symbols;
+               Elf_Data *data;
+               Elf_Data *rodata;
+               Elf_Data *bss;
                size_t strtabidx;
                struct {
                        GElf_Shdr shdr;
@@ -222,6 +254,9 @@ struct bpf_object {
                int nr_reloc;
                int maps_shndx;
                int text_shndx;
+               int data_shndx;
+               int rodata_shndx;
+               int bss_shndx;
        } efile;
        /*
         * All loaded bpf_object is linked in a list, which is
@@ -443,6 +478,7 @@ static struct bpf_object *bpf_object__new(const char *path,
                                          size_t obj_buf_sz)
 {
        struct bpf_object *obj;
+       char *end;
 
        obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1);
        if (!obj) {
@@ -451,8 +487,14 @@ static struct bpf_object *bpf_object__new(const char *path,
        }
 
        strcpy(obj->path, path);
-       obj->efile.fd = -1;
+       /* Using basename() GNU version which doesn't modify arg. */
+       strncpy(obj->name, basename((void *)path),
+               sizeof(obj->name) - 1);
+       end = strchr(obj->name, '.');
+       if (end)
+               *end = 0;
 
+       obj->efile.fd = -1;
        /*
         * Caller of this function should also calls
         * bpf_object__elf_finish() after data collection to return
@@ -462,6 +504,9 @@ static struct bpf_object *bpf_object__new(const char *path,
        obj->efile.obj_buf = obj_buf;
        obj->efile.obj_buf_sz = obj_buf_sz;
        obj->efile.maps_shndx = -1;
+       obj->efile.data_shndx = -1;
+       obj->efile.rodata_shndx = -1;
+       obj->efile.bss_shndx = -1;
 
        obj->loaded = false;
 
@@ -480,6 +525,9 @@ static void bpf_object__elf_finish(struct bpf_object *obj)
                obj->efile.elf = NULL;
        }
        obj->efile.symbols = NULL;
+       obj->efile.data = NULL;
+       obj->efile.rodata = NULL;
+       obj->efile.bss = NULL;
 
        zfree(&obj->efile.reloc);
        obj->efile.nr_reloc = 0;
@@ -621,27 +669,182 @@ static bool bpf_map_type__is_map_in_map(enum bpf_map_type type)
        return false;
 }
 
+static int bpf_object_search_section_size(const struct bpf_object *obj,
+                                         const char *name, size_t *d_size)
+{
+       const GElf_Ehdr *ep = &obj->efile.ehdr;
+       Elf *elf = obj->efile.elf;
+       Elf_Scn *scn = NULL;
+       int idx = 0;
+
+       while ((scn = elf_nextscn(elf, scn)) != NULL) {
+               const char *sec_name;
+               Elf_Data *data;
+               GElf_Shdr sh;
+
+               idx++;
+               if (gelf_getshdr(scn, &sh) != &sh) {
+                       pr_warning("failed to get section(%d) header from %s\n",
+                                  idx, obj->path);
+                       return -EIO;
+               }
+
+               sec_name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name);
+               if (!sec_name) {
+                       pr_warning("failed to get section(%d) name from %s\n",
+                                  idx, obj->path);
+                       return -EIO;
+               }
+
+               if (strcmp(name, sec_name))
+                       continue;
+
+               data = elf_getdata(scn, 0);
+               if (!data) {
+                       pr_warning("failed to get section(%d) data from %s(%s)\n",
+                                  idx, name, obj->path);
+                       return -EIO;
+               }
+
+               *d_size = data->d_size;
+               return 0;
+       }
+
+       return -ENOENT;
+}
+
+int bpf_object__section_size(const struct bpf_object *obj, const char *name,
+                            __u32 *size)
+{
+       int ret = -ENOENT;
+       size_t d_size;
+
+       *size = 0;
+       if (!name) {
+               return -EINVAL;
+       } else if (!strcmp(name, ".data")) {
+               if (obj->efile.data)
+                       *size = obj->efile.data->d_size;
+       } else if (!strcmp(name, ".bss")) {
+               if (obj->efile.bss)
+                       *size = obj->efile.bss->d_size;
+       } else if (!strcmp(name, ".rodata")) {
+               if (obj->efile.rodata)
+                       *size = obj->efile.rodata->d_size;
+       } else {
+               ret = bpf_object_search_section_size(obj, name, &d_size);
+               if (!ret)
+                       *size = d_size;
+       }
+
+       return *size ? 0 : ret;
+}
+
+int bpf_object__variable_offset(const struct bpf_object *obj, const char *name,
+                               __u32 *off)
+{
+       Elf_Data *symbols = obj->efile.symbols;
+       const char *sname;
+       size_t si;
+
+       if (!name || !off)
+               return -EINVAL;
+
+       for (si = 0; si < symbols->d_size / sizeof(GElf_Sym); si++) {
+               GElf_Sym sym;
+
+               if (!gelf_getsym(symbols, si, &sym))
+                       continue;
+               if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL ||
+                   GELF_ST_TYPE(sym.st_info) != STT_OBJECT)
+                       continue;
+
+               sname = elf_strptr(obj->efile.elf, obj->efile.strtabidx,
+                                  sym.st_name);
+               if (!sname) {
+                       pr_warning("failed to get sym name string for var %s\n",
+                                  name);
+                       return -EIO;
+               }
+               if (strcmp(name, sname) == 0) {
+                       *off = sym.st_value;
+                       return 0;
+               }
+       }
+
+       return -ENOENT;
+}
+
+static bool bpf_object__has_maps(const struct bpf_object *obj)
+{
+       return obj->efile.maps_shndx >= 0 ||
+              obj->efile.data_shndx >= 0 ||
+              obj->efile.rodata_shndx >= 0 ||
+              obj->efile.bss_shndx >= 0;
+}
+
+static int
+bpf_object__init_internal_map(struct bpf_object *obj, struct bpf_map *map,
+                             enum libbpf_map_type type, Elf_Data *data,
+                             void **data_buff)
+{
+       struct bpf_map_def *def = &map->def;
+       char map_name[BPF_OBJ_NAME_LEN];
+
+       map->libbpf_type = type;
+       map->offset = ~(typeof(map->offset))0;
+       snprintf(map_name, sizeof(map_name), "%.8s%.7s", obj->name,
+                libbpf_type_to_btf_name[type]);
+       map->name = strdup(map_name);
+       if (!map->name) {
+               pr_warning("failed to alloc map name\n");
+               return -ENOMEM;
+       }
+
+       def->type = BPF_MAP_TYPE_ARRAY;
+       def->key_size = sizeof(int);
+       def->value_size = data->d_size;
+       def->max_entries = 1;
+       def->map_flags = type == LIBBPF_MAP_RODATA ?
+                        BPF_F_RDONLY_PROG : 0;
+       if (data_buff) {
+               *data_buff = malloc(data->d_size);
+               if (!*data_buff) {
+                       zfree(&map->name);
+                       pr_warning("failed to alloc map content buffer\n");
+                       return -ENOMEM;
+               }
+               memcpy(*data_buff, data->d_buf, data->d_size);
+       }
+
+       pr_debug("map %ld is \"%s\"\n", map - obj->maps, map->name);
+       return 0;
+}
+
 static int
 bpf_object__init_maps(struct bpf_object *obj, int flags)
 {
+       int i, map_idx, map_def_sz = 0, nr_syms, nr_maps = 0, nr_maps_glob = 0;
        bool strict = !(flags & MAPS_RELAX_COMPAT);
-       int i, map_idx, map_def_sz, nr_maps = 0;
-       Elf_Scn *scn;
-       Elf_Data *data = NULL;
        Elf_Data *symbols = obj->efile.symbols;
+       Elf_Data *data = NULL;
+       int ret = 0;
 
-       if (obj->efile.maps_shndx < 0)
-               return -EINVAL;
        if (!symbols)
                return -EINVAL;
+       nr_syms = symbols->d_size / sizeof(GElf_Sym);
 
-       scn = elf_getscn(obj->efile.elf, obj->efile.maps_shndx);
-       if (scn)
-               data = elf_getdata(scn, NULL);
-       if (!scn || !data) {
-               pr_warning("failed to get Elf_Data from map section %d\n",
-                          obj->efile.maps_shndx);
-               return -EINVAL;
+       if (obj->efile.maps_shndx >= 0) {
+               Elf_Scn *scn = elf_getscn(obj->efile.elf,
+                                         obj->efile.maps_shndx);
+
+               if (scn)
+                       data = elf_getdata(scn, NULL);
+               if (!scn || !data) {
+                       pr_warning("failed to get Elf_Data from map section %d\n",
+                                  obj->efile.maps_shndx);
+                       return -EINVAL;
+               }
        }
 
        /*
@@ -651,7 +854,13 @@ bpf_object__init_maps(struct bpf_object *obj, int flags)
         *
         * TODO: Detect array of map and report error.
         */
-       for (i = 0; i < symbols->d_size / sizeof(GElf_Sym); i++) {
+       if (obj->efile.data_shndx >= 0)
+               nr_maps_glob++;
+       if (obj->efile.rodata_shndx >= 0)
+               nr_maps_glob++;
+       if (obj->efile.bss_shndx >= 0)
+               nr_maps_glob++;
+       for (i = 0; data && i < nr_syms; i++) {
                GElf_Sym sym;
 
                if (!gelf_getsym(symbols, i, &sym))
@@ -664,19 +873,21 @@ bpf_object__init_maps(struct bpf_object *obj, int flags)
        /* Alloc obj->maps and fill nr_maps. */
        pr_debug("maps in %s: %d maps in %zd bytes\n", obj->path,
                 nr_maps, data->d_size);
-
-       if (!nr_maps)
+       if (!nr_maps && !nr_maps_glob)
                return 0;
 
        /* Assume equally sized map definitions */
-       map_def_sz = data->d_size / nr_maps;
-       if (!data->d_size || (data->d_size % nr_maps) != 0) {
-               pr_warning("unable to determine map definition size "
-                          "section %s, %d maps in %zd bytes\n",
-                          obj->path, nr_maps, data->d_size);
-               return -EINVAL;
+       if (data) {
+               map_def_sz = data->d_size / nr_maps;
+               if (!data->d_size || (data->d_size % nr_maps) != 0) {
+                       pr_warning("unable to determine map definition size "
+                                  "section %s, %d maps in %zd bytes\n",
+                                  obj->path, nr_maps, data->d_size);
+                       return -EINVAL;
+               }
        }
 
+       nr_maps += nr_maps_glob;
        obj->maps = calloc(nr_maps, sizeof(obj->maps[0]));
        if (!obj->maps) {
                pr_warning("alloc maps for object failed\n");
@@ -697,7 +908,7 @@ bpf_object__init_maps(struct bpf_object *obj, int flags)
        /*
         * Fill obj->maps using data in "maps" section.
         */
-       for (i = 0, map_idx = 0; i < symbols->d_size / sizeof(GElf_Sym); i++) {
+       for (i = 0, map_idx = 0; data && i < nr_syms; i++) {
                GElf_Sym sym;
                const char *map_name;
                struct bpf_map_def *def;
@@ -710,6 +921,8 @@ bpf_object__init_maps(struct bpf_object *obj, int flags)
                map_name = elf_strptr(obj->efile.elf,
                                      obj->efile.strtabidx,
                                      sym.st_name);
+
+               obj->maps[map_idx].libbpf_type = LIBBPF_MAP_UNSPEC;
                obj->maps[map_idx].offset = sym.st_value;
                if (sym.st_value + map_def_sz > data->d_size) {
                        pr_warning("corrupted maps section in %s: last map \"%s\" too small\n",
@@ -758,8 +971,27 @@ bpf_object__init_maps(struct bpf_object *obj, int flags)
                map_idx++;
        }
 
-       qsort(obj->maps, obj->nr_maps, sizeof(obj->maps[0]), compare_bpf_map);
-       return 0;
+       /*
+        * Populate rest of obj->maps with libbpf internal maps.
+        */
+       if (obj->efile.data_shndx >= 0)
+               ret = bpf_object__init_internal_map(obj, &obj->maps[map_idx++],
+                                                   LIBBPF_MAP_DATA,
+                                                   obj->efile.data,
+                                                   &obj->sections.data);
+       if (!ret && obj->efile.rodata_shndx >= 0)
+               ret = bpf_object__init_internal_map(obj, &obj->maps[map_idx++],
+                                                   LIBBPF_MAP_RODATA,
+                                                   obj->efile.rodata,
+                                                   &obj->sections.rodata);
+       if (!ret && obj->efile.bss_shndx >= 0)
+               ret = bpf_object__init_internal_map(obj, &obj->maps[map_idx++],
+                                                   LIBBPF_MAP_BSS,
+                                                   obj->efile.bss, NULL);
+       if (!ret)
+               qsort(obj->maps, obj->nr_maps, sizeof(obj->maps[0]),
+                     compare_bpf_map);
+       return ret;
 }
 
 static bool section_have_execinstr(struct bpf_object *obj, int idx)
@@ -785,6 +1017,7 @@ static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
        Elf *elf = obj->efile.elf;
        GElf_Ehdr *ep = &obj->efile.ehdr;
        Elf_Data *btf_ext_data = NULL;
+       Elf_Data *btf_data = NULL;
        Elf_Scn *scn = NULL;
        int idx = 0, err = 0;
 
@@ -828,32 +1061,18 @@ static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
                         (int)sh.sh_link, (unsigned long)sh.sh_flags,
                         (int)sh.sh_type);
 
-               if (strcmp(name, "license") == 0)
+               if (strcmp(name, "license") == 0) {
                        err = bpf_object__init_license(obj,
                                                       data->d_buf,
                                                       data->d_size);
-               else if (strcmp(name, "version") == 0)
+               } else if (strcmp(name, "version") == 0) {
                        err = bpf_object__init_kversion(obj,
                                                        data->d_buf,
                                                        data->d_size);
-               else if (strcmp(name, "maps") == 0)
+               } else if (strcmp(name, "maps") == 0) {
                        obj->efile.maps_shndx = idx;
-               else if (strcmp(name, BTF_ELF_SEC) == 0) {
-                       obj->btf = btf__new(data->d_buf, data->d_size);
-                       if (IS_ERR(obj->btf)) {
-                               pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n",
-                                          BTF_ELF_SEC, PTR_ERR(obj->btf));
-                               obj->btf = NULL;
-                               continue;
-                       }
-                       err = btf__load(obj->btf);
-                       if (err) {
-                               pr_warning("Error loading %s into kernel: %d. Ignored and continue.\n",
-                                          BTF_ELF_SEC, err);
-                               btf__free(obj->btf);
-                               obj->btf = NULL;
-                               err = 0;
-                       }
+               } else if (strcmp(name, BTF_ELF_SEC) == 0) {
+                       btf_data = data;
                } else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) {
                        btf_ext_data = data;
                } else if (sh.sh_type == SHT_SYMTAB) {
@@ -865,20 +1084,28 @@ static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
                                obj->efile.symbols = data;
                                obj->efile.strtabidx = sh.sh_link;
                        }
-               } else if ((sh.sh_type == SHT_PROGBITS) &&
-                          (sh.sh_flags & SHF_EXECINSTR) &&
-                          (data->d_size > 0)) {
-                       if (strcmp(name, ".text") == 0)
-                               obj->efile.text_shndx = idx;
-                       err = bpf_object__add_program(obj, data->d_buf,
-                                                     data->d_size, name, idx);
-                       if (err) {
-                               char errmsg[STRERR_BUFSIZE];
-                               char *cp = libbpf_strerror_r(-err, errmsg,
-                                                            sizeof(errmsg));
-
-                               pr_warning("failed to alloc program %s (%s): %s",
-                                          name, obj->path, cp);
+               } else if (sh.sh_type == SHT_PROGBITS && data->d_size > 0) {
+                       if (sh.sh_flags & SHF_EXECINSTR) {
+                               if (strcmp(name, ".text") == 0)
+                                       obj->efile.text_shndx = idx;
+                               err = bpf_object__add_program(obj, data->d_buf,
+                                                             data->d_size, name, idx);
+                               if (err) {
+                                       char errmsg[STRERR_BUFSIZE];
+                                       char *cp = libbpf_strerror_r(-err, errmsg,
+                                                                    sizeof(errmsg));
+
+                                       pr_warning("failed to alloc program %s (%s): %s",
+                                                  name, obj->path, cp);
+                               }
+                       } else if (strcmp(name, ".data") == 0) {
+                               obj->efile.data = data;
+                               obj->efile.data_shndx = idx;
+                       } else if (strcmp(name, ".rodata") == 0) {
+                               obj->efile.rodata = data;
+                               obj->efile.rodata_shndx = idx;
+                       } else {
+                               pr_debug("skip section(%d) %s\n", idx, name);
                        }
                } else if (sh.sh_type == SHT_REL) {
                        void *reloc = obj->efile.reloc;
@@ -906,6 +1133,9 @@ static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
                                obj->efile.reloc[n].shdr = sh;
                                obj->efile.reloc[n].data = data;
                        }
+               } else if (sh.sh_type == SHT_NOBITS && strcmp(name, ".bss") == 0) {
+                       obj->efile.bss = data;
+                       obj->efile.bss_shndx = idx;
                } else {
                        pr_debug("skip section(%d) %s\n", idx, name);
                }
@@ -917,6 +1147,25 @@ static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
                pr_warning("Corrupted ELF file: index of strtab invalid\n");
                return LIBBPF_ERRNO__FORMAT;
        }
+       if (btf_data) {
+               obj->btf = btf__new(btf_data->d_buf, btf_data->d_size);
+               if (IS_ERR(obj->btf)) {
+                       pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n",
+                                  BTF_ELF_SEC, PTR_ERR(obj->btf));
+                       obj->btf = NULL;
+               } else {
+                       err = btf__finalize_data(obj, obj->btf);
+                       if (!err)
+                               err = btf__load(obj->btf);
+                       if (err) {
+                               pr_warning("Error finalizing and loading %s into kernel: %d. Ignored and continue.\n",
+                                          BTF_ELF_SEC, err);
+                               btf__free(obj->btf);
+                               obj->btf = NULL;
+                               err = 0;
+                       }
+               }
+       }
        if (btf_ext_data) {
                if (!obj->btf) {
                        pr_debug("Ignore ELF section %s because its depending ELF section %s is not found.\n",
@@ -932,7 +1181,7 @@ static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
                        }
                }
        }
-       if (obj->efile.maps_shndx >= 0) {
+       if (bpf_object__has_maps(obj)) {
                err = bpf_object__init_maps(obj, flags);
                if (err)
                        goto out;
@@ -968,13 +1217,46 @@ bpf_object__find_program_by_title(struct bpf_object *obj, const char *title)
        return NULL;
 }
 
+static bool bpf_object__shndx_is_data(const struct bpf_object *obj,
+                                     int shndx)
+{
+       return shndx == obj->efile.data_shndx ||
+              shndx == obj->efile.bss_shndx ||
+              shndx == obj->efile.rodata_shndx;
+}
+
+static bool bpf_object__shndx_is_maps(const struct bpf_object *obj,
+                                     int shndx)
+{
+       return shndx == obj->efile.maps_shndx;
+}
+
+static bool bpf_object__relo_in_known_section(const struct bpf_object *obj,
+                                             int shndx)
+{
+       return shndx == obj->efile.text_shndx ||
+              bpf_object__shndx_is_maps(obj, shndx) ||
+              bpf_object__shndx_is_data(obj, shndx);
+}
+
+static enum libbpf_map_type
+bpf_object__section_to_libbpf_map_type(const struct bpf_object *obj, int shndx)
+{
+       if (shndx == obj->efile.data_shndx)
+               return LIBBPF_MAP_DATA;
+       else if (shndx == obj->efile.bss_shndx)
+               return LIBBPF_MAP_BSS;
+       else if (shndx == obj->efile.rodata_shndx)
+               return LIBBPF_MAP_RODATA;
+       else
+               return LIBBPF_MAP_UNSPEC;
+}
+
 static int
 bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
                           Elf_Data *data, struct bpf_object *obj)
 {
        Elf_Data *symbols = obj->efile.symbols;
-       int text_shndx = obj->efile.text_shndx;
-       int maps_shndx = obj->efile.maps_shndx;
        struct bpf_map *maps = obj->maps;
        size_t nr_maps = obj->nr_maps;
        int i, nrels;
@@ -994,7 +1276,10 @@ bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
                GElf_Sym sym;
                GElf_Rel rel;
                unsigned int insn_idx;
+               unsigned int shdr_idx;
                struct bpf_insn *insns = prog->insns;
+               enum libbpf_map_type type;
+               const char *name;
                size_t map_idx;
 
                if (!gelf_getrel(data, i, &rel)) {
@@ -1009,13 +1294,18 @@ bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
                                   GELF_R_SYM(rel.r_info));
                        return -LIBBPF_ERRNO__FORMAT;
                }
-               pr_debug("relo for %lld value %lld name %d\n",
+
+               name = elf_strptr(obj->efile.elf, obj->efile.strtabidx,
+                                 sym.st_name) ? : "<?>";
+
+               pr_debug("relo for %lld value %lld name %d (\'%s\')\n",
                         (long long) (rel.r_info >> 32),
-                        (long long) sym.st_value, sym.st_name);
+                        (long long) sym.st_value, sym.st_name, name);
 
-               if (sym.st_shndx != maps_shndx && sym.st_shndx != text_shndx) {
-                       pr_warning("Program '%s' contains non-map related relo data pointing to section %u\n",
-                                  prog->section_name, sym.st_shndx);
+               shdr_idx = sym.st_shndx;
+               if (!bpf_object__relo_in_known_section(obj, shdr_idx)) {
+                       pr_warning("Program '%s' contains unrecognized relo data pointing to section %u\n",
+                                  prog->section_name, shdr_idx);
                        return -LIBBPF_ERRNO__RELOC;
                }
 
@@ -1040,24 +1330,39 @@ bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
                        return -LIBBPF_ERRNO__RELOC;
                }
 
-               /* TODO: 'maps' is sorted. We can use bsearch to make it faster. */
-               for (map_idx = 0; map_idx < nr_maps; map_idx++) {
-                       if (maps[map_idx].offset == sym.st_value) {
-                               pr_debug("relocation: find map %zd (%s) for insn %u\n",
-                                        map_idx, maps[map_idx].name, insn_idx);
-                               break;
+               if (bpf_object__shndx_is_maps(obj, shdr_idx) ||
+                   bpf_object__shndx_is_data(obj, shdr_idx)) {
+                       type = bpf_object__section_to_libbpf_map_type(obj, shdr_idx);
+                       if (type != LIBBPF_MAP_UNSPEC &&
+                           GELF_ST_BIND(sym.st_info) == STB_GLOBAL) {
+                               pr_warning("bpf: relocation: not yet supported relo for non-static global \'%s\' variable found in insns[%d].code 0x%x\n",
+                                          name, insn_idx, insns[insn_idx].code);
+                               return -LIBBPF_ERRNO__RELOC;
                        }
-               }
 
-               if (map_idx >= nr_maps) {
-                       pr_warning("bpf relocation: map_idx %d large than %d\n",
-                                  (int)map_idx, (int)nr_maps - 1);
-                       return -LIBBPF_ERRNO__RELOC;
-               }
+                       for (map_idx = 0; map_idx < nr_maps; map_idx++) {
+                               if (maps[map_idx].libbpf_type != type)
+                                       continue;
+                               if (type != LIBBPF_MAP_UNSPEC ||
+                                   (type == LIBBPF_MAP_UNSPEC &&
+                                    maps[map_idx].offset == sym.st_value)) {
+                                       pr_debug("relocation: find map %zd (%s) for insn %u\n",
+                                                map_idx, maps[map_idx].name, insn_idx);
+                                       break;
+                               }
+                       }
+
+                       if (map_idx >= nr_maps) {
+                               pr_warning("bpf relocation: map_idx %d large than %d\n",
+                                          (int)map_idx, (int)nr_maps - 1);
+                               return -LIBBPF_ERRNO__RELOC;
+                       }
 
-               prog->reloc_desc[i].type = RELO_LD64;
-               prog->reloc_desc[i].insn_idx = insn_idx;
-               prog->reloc_desc[i].map_idx = map_idx;
+                       prog->reloc_desc[i].type = type != LIBBPF_MAP_UNSPEC ?
+                                                  RELO_DATA : RELO_LD64;
+                       prog->reloc_desc[i].insn_idx = insn_idx;
+                       prog->reloc_desc[i].map_idx = map_idx;
+               }
        }
        return 0;
 }
@@ -1065,18 +1370,27 @@ bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
 static int bpf_map_find_btf_info(struct bpf_map *map, const struct btf *btf)
 {
        struct bpf_map_def *def = &map->def;
-       __u32 key_type_id, value_type_id;
+       __u32 key_type_id = 0, value_type_id = 0;
        int ret;
 
-       ret = btf__get_map_kv_tids(btf, map->name, def->key_size,
-                                  def->value_size, &key_type_id,
-                                  &value_type_id);
-       if (ret)
+       if (!bpf_map__is_internal(map)) {
+               ret = btf__get_map_kv_tids(btf, map->name, def->key_size,
+                                          def->value_size, &key_type_id,
+                                          &value_type_id);
+       } else {
+               /*
+                * LLVM annotates global data differently in BTF, that is,
+                * only as '.data', '.bss' or '.rodata'.
+                */
+               ret = btf__find_by_name(btf,
+                               libbpf_type_to_btf_name[map->libbpf_type]);
+       }
+       if (ret < 0)
                return ret;
 
        map->btf_key_type_id = key_type_id;
-       map->btf_value_type_id = value_type_id;
-
+       map->btf_value_type_id = bpf_map__is_internal(map) ?
+                                ret : value_type_id;
        return 0;
 }
 
@@ -1187,6 +1501,34 @@ bpf_object__probe_caps(struct bpf_object *obj)
        return bpf_object__probe_name(obj);
 }
 
+static int
+bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
+{
+       char *cp, errmsg[STRERR_BUFSIZE];
+       int err, zero = 0;
+       __u8 *data;
+
+       /* Nothing to do here since kernel already zero-initializes .bss map. */
+       if (map->libbpf_type == LIBBPF_MAP_BSS)
+               return 0;
+
+       data = map->libbpf_type == LIBBPF_MAP_DATA ?
+              obj->sections.data : obj->sections.rodata;
+
+       err = bpf_map_update_elem(map->fd, &zero, data, 0);
+       /* Freeze .rodata map as read-only from syscall side. */
+       if (!err && map->libbpf_type == LIBBPF_MAP_RODATA) {
+               err = bpf_map_freeze(map->fd);
+               if (err) {
+                       cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
+                       pr_warning("Error freezing map(%s) as read-only: %s\n",
+                                  map->name, cp);
+                       err = 0;
+               }
+       }
+       return err;
+}
+
 static int
 bpf_object__create_maps(struct bpf_object *obj)
 {
@@ -1244,6 +1586,7 @@ bpf_object__create_maps(struct bpf_object *obj)
                        size_t j;
 
                        err = *pfd;
+err_out:
                        cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
                        pr_warning("failed to create map (name: '%s'): %s\n",
                                   map->name, cp);
@@ -1251,6 +1594,15 @@ bpf_object__create_maps(struct bpf_object *obj)
                                zclose(obj->maps[j].fd);
                        return err;
                }
+
+               if (bpf_map__is_internal(map)) {
+                       err = bpf_object__populate_internal_map(obj, map);
+                       if (err < 0) {
+                               zclose(*pfd);
+                               goto err_out;
+                       }
+               }
+
                pr_debug("create map %s: fd=%d\n", map->name, *pfd);
        }
 
@@ -1405,21 +1757,29 @@ bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj)
                return 0;
 
        for (i = 0; i < prog->nr_reloc; i++) {
-               if (prog->reloc_desc[i].type == RELO_LD64) {
+               if (prog->reloc_desc[i].type == RELO_LD64 ||
+                   prog->reloc_desc[i].type == RELO_DATA) {
+                       bool relo_data = prog->reloc_desc[i].type == RELO_DATA;
                        struct bpf_insn *insns = prog->insns;
                        int insn_idx, map_idx;
 
                        insn_idx = prog->reloc_desc[i].insn_idx;
                        map_idx = prog->reloc_desc[i].map_idx;
 
-                       if (insn_idx >= (int)prog->insns_cnt) {
+                       if (insn_idx + 1 >= (int)prog->insns_cnt) {
                                pr_warning("relocation out of range: '%s'\n",
                                           prog->section_name);
                                return -LIBBPF_ERRNO__RELOC;
                        }
-                       insns[insn_idx].src_reg = BPF_PSEUDO_MAP_FD;
+
+                       if (!relo_data) {
+                               insns[insn_idx].src_reg = BPF_PSEUDO_MAP_FD;
+                       } else {
+                               insns[insn_idx].src_reg = BPF_PSEUDO_MAP_VALUE;
+                               insns[insn_idx + 1].imm = insns[insn_idx].imm;
+                       }
                        insns[insn_idx].imm = obj->maps[map_idx].fd;
-               } else {
+               } else if (prog->reloc_desc[i].type == RELO_CALL) {
                        err = bpf_program__reloc_text(prog, obj,
                                                      &prog->reloc_desc[i]);
                        if (err)
@@ -1494,6 +1854,7 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
 {
        struct bpf_load_program_attr load_attr;
        char *cp, errmsg[STRERR_BUFSIZE];
+       int log_buf_size = BPF_LOG_BUF_SIZE;
        char *log_buf;
        int ret;
 
@@ -1514,21 +1875,30 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
        load_attr.line_info = prog->line_info;
        load_attr.line_info_rec_size = prog->line_info_rec_size;
        load_attr.line_info_cnt = prog->line_info_cnt;
+       load_attr.log_level = prog->log_level;
        if (!load_attr.insns || !load_attr.insns_cnt)
                return -EINVAL;
 
-       log_buf = malloc(BPF_LOG_BUF_SIZE);
+retry_load:
+       log_buf = malloc(log_buf_size);
        if (!log_buf)
                pr_warning("Alloc log buffer for bpf loader error, continue without log\n");
 
-       ret = bpf_load_program_xattr(&load_attr, log_buf, BPF_LOG_BUF_SIZE);
+       ret = bpf_load_program_xattr(&load_attr, log_buf, log_buf_size);
 
        if (ret >= 0) {
+               if (load_attr.log_level)
+                       pr_debug("verifier log:\n%s", log_buf);
                *pfd = ret;
                ret = 0;
                goto out;
        }
 
+       if (errno == ENOSPC) {
+               log_buf_size <<= 1;
+               free(log_buf);
+               goto retry_load;
+       }
        ret = -LIBBPF_ERRNO__LOAD;
        cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
        pr_warning("load bpf program failed: %s\n", cp);
@@ -2303,6 +2673,9 @@ void bpf_object__close(struct bpf_object *obj)
                obj->maps[i].priv = NULL;
                obj->maps[i].clear_priv = NULL;
        }
+
+       zfree(&obj->sections.rodata);
+       zfree(&obj->sections.data);
        zfree(&obj->maps);
        obj->nr_maps = 0;
 
@@ -2780,6 +3153,11 @@ bool bpf_map__is_offload_neutral(struct bpf_map *map)
        return map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
 }
 
+bool bpf_map__is_internal(struct bpf_map *map)
+{
+       return map->libbpf_type != LIBBPF_MAP_UNSPEC;
+}
+
 void bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex)
 {
        map->map_ifindex = ifindex;
@@ -2938,6 +3316,7 @@ int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
                bpf_program__set_expected_attach_type(prog,
                                                      expected_attach_type);
 
+               prog->log_level = attr->log_level;
                if (!first_prog)
                        first_prog = prog;
        }
index c70785cc8ef560165e323abcf9e83f8cc05eec31..c5ff00515ce7d606a1c68ba119cdc091a3731e00 100644 (file)
@@ -75,6 +75,10 @@ struct bpf_object *__bpf_object__open_xattr(struct bpf_object_open_attr *attr,
 LIBBPF_API struct bpf_object *bpf_object__open_buffer(void *obj_buf,
                                                      size_t obj_buf_sz,
                                                      const char *name);
+int bpf_object__section_size(const struct bpf_object *obj, const char *name,
+                            __u32 *size);
+int bpf_object__variable_offset(const struct bpf_object *obj, const char *name,
+                               __u32 *off);
 LIBBPF_API int bpf_object__pin_maps(struct bpf_object *obj, const char *path);
 LIBBPF_API int bpf_object__unpin_maps(struct bpf_object *obj,
                                      const char *path);
@@ -301,6 +305,7 @@ LIBBPF_API void *bpf_map__priv(struct bpf_map *map);
 LIBBPF_API int bpf_map__reuse_fd(struct bpf_map *map, int fd);
 LIBBPF_API int bpf_map__resize(struct bpf_map *map, __u32 max_entries);
 LIBBPF_API bool bpf_map__is_offload_neutral(struct bpf_map *map);
+LIBBPF_API bool bpf_map__is_internal(struct bpf_map *map);
 LIBBPF_API void bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex);
 LIBBPF_API int bpf_map__pin(struct bpf_map *map, const char *path);
 LIBBPF_API int bpf_map__unpin(struct bpf_map *map, const char *path);
@@ -314,6 +319,7 @@ struct bpf_prog_load_attr {
        enum bpf_prog_type prog_type;
        enum bpf_attach_type expected_attach_type;
        int ifindex;
+       int log_level;
 };
 
 LIBBPF_API int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
index f3ce50500cf2985edae7202b5dbf09b63c402d3f..673001787cba26f32572ae2e8dc2dc0a8bc9663f 100644 (file)
@@ -157,3 +157,10 @@ LIBBPF_0.0.2 {
                bpf_program__bpil_addr_to_offs;
                bpf_program__bpil_offs_to_addr;
 } LIBBPF_0.0.1;
+
+LIBBPF_0.0.3 {
+       global:
+               bpf_map__is_internal;
+               bpf_map_freeze;
+               btf__finalize_data;
+} LIBBPF_0.0.2;
diff --git a/tools/lib/bpf/libbpf.pc.template b/tools/lib/bpf/libbpf.pc.template
new file mode 100644 (file)
index 0000000..ac17fce
--- /dev/null
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+
+prefix=@PREFIX@
+libdir=@LIBDIR@
+includedir=${prefix}/include
+
+Name: libbpf
+Description: BPF library
+Version: @VERSION@
+Libs: -L${libdir} -lbpf
+Requires.private: libelf
+Cflags: -I${includedir}
index 8d0078b65486f45730f3d967c84a6709afb23cdb..557ef8d1250dfb33f42ebfbe7acdcd028362cec8 100644 (file)
@@ -259,7 +259,8 @@ int xsk_umem__create(struct xsk_umem **umem_ptr, void *umem_area, __u64 size,
 
 static int xsk_load_xdp_prog(struct xsk_socket *xsk)
 {
-       char bpf_log_buf[BPF_LOG_BUF_SIZE];
+       static const int log_buf_size = 16 * 1024;
+       char log_buf[log_buf_size];
        int err, prog_fd;
 
        /* This is the C-program:
@@ -308,10 +309,10 @@ static int xsk_load_xdp_prog(struct xsk_socket *xsk)
        size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn);
 
        prog_fd = bpf_load_program(BPF_PROG_TYPE_XDP, prog, insns_cnt,
-                                  "LGPL-2.1 or BSD-2-Clause", 0, bpf_log_buf,
-                                  BPF_LOG_BUF_SIZE);
+                                  "LGPL-2.1 or BSD-2-Clause", 0, log_buf,
+                                  log_buf_size);
        if (prog_fd < 0) {
-               pr_warning("BPF log buffer:\n%s", bpf_log_buf);
+               pr_warning("BPF log buffer:\n%s", log_buf);
                return prog_fd;
        }
 
index c9d038f91af6b345044bb116680a66f140d2ead1..53f8be0f4a1f763e613b649aeac98399bd34eb69 100644 (file)
@@ -25,14 +25,17 @@ LIBSUBCMD           = $(LIBSUBCMD_OUTPUT)libsubcmd.a
 OBJTOOL    := $(OUTPUT)objtool
 OBJTOOL_IN := $(OBJTOOL)-in.o
 
+LIBELF_FLAGS := $(shell pkg-config libelf --cflags 2>/dev/null)
+LIBELF_LIBS  := $(shell pkg-config libelf --libs 2>/dev/null || echo -lelf)
+
 all: $(OBJTOOL)
 
 INCLUDES := -I$(srctree)/tools/include \
            -I$(srctree)/tools/arch/$(HOSTARCH)/include/uapi \
            -I$(srctree)/tools/objtool/arch/$(ARCH)/include
 WARNINGS := $(EXTRA_WARNINGS) -Wno-switch-default -Wno-switch-enum -Wno-packed
-CFLAGS   += -Werror $(WARNINGS) $(KBUILD_HOSTCFLAGS) -g $(INCLUDES)
-LDFLAGS  += -lelf $(LIBSUBCMD) $(KBUILD_HOSTLDFLAGS)
+CFLAGS   += -Werror $(WARNINGS) $(KBUILD_HOSTCFLAGS) -g $(INCLUDES) $(LIBELF_FLAGS)
+LDFLAGS  += $(LIBELF_LIBS) $(LIBSUBCMD) $(KBUILD_HOSTLDFLAGS)
 
 # Allow old libelf to be used:
 elfshdr := $(shell echo '$(pound)include <libelf.h>' | $(CC) $(CFLAGS) -x c -E - | grep elf_getshdr)
index 5dde107083c60bc8dd12538a2838de73c8b1470c..479196aeb4096efb0f0c03f6722cefcb1765326e 100644 (file)
@@ -165,6 +165,7 @@ static int __dead_end_function(struct objtool_file *file, struct symbol *func,
                "fortify_panic",
                "usercopy_abort",
                "machine_real_restart",
+               "rewind_stack_do_exit",
        };
 
        if (func->bind == STB_WEAK)
index 01f7555fd93369a60d8abe6bee7198eb1cbd3e12..e8c9f77e9010748fc81695965441439cd15780c5 100644 (file)
@@ -481,8 +481,8 @@ $(madvise_behavior_array): $(madvise_hdr_dir)/mman-common.h $(madvise_behavior_t
 mmap_flags_array := $(beauty_outdir)/mmap_flags_array.c
 mmap_flags_tbl := $(srctree)/tools/perf/trace/beauty/mmap_flags.sh
 
-$(mmap_flags_array): $(asm_generic_uapi_dir)/mman.h $(asm_generic_uapi_dir)/mman-common.h $(mmap_flags_tbl)
-       $(Q)$(SHELL) '$(mmap_flags_tbl)' $(asm_generic_uapi_dir) $(arch_asm_uapi_dir) > $@
+$(mmap_flags_array): $(linux_uapi_dir)/mman.h $(asm_generic_uapi_dir)/mman.h $(asm_generic_uapi_dir)/mman-common.h $(mmap_flags_tbl)
+       $(Q)$(SHELL) '$(mmap_flags_tbl)' $(linux_uapi_dir) $(asm_generic_uapi_dir) $(arch_asm_uapi_dir) > $@
 
 mount_flags_array := $(beauty_outdir)/mount_flags_array.c
 mount_flags_tbl := $(srctree)/tools/perf/trace/beauty/mount_flags.sh
index 2ae92fddb6d5f336de25e36e61b8775e0b797c50..92ee0b4378d4c23b9ac3d22d92d4637efb187f94 100644 (file)
 334    common  rseq                    __x64_sys_rseq
 # don't use numbers 387 through 423, add new calls after the last
 # 'common' entry
+424    common  pidfd_send_signal       __x64_sys_pidfd_send_signal
+425    common  io_uring_setup          __x64_sys_io_uring_setup
+426    common  io_uring_enter          __x64_sys_io_uring_enter
+427    common  io_uring_register       __x64_sys_io_uring_register
 
 #
 # x32-specific system call numbers start at 512 to avoid cache impact
index 7b55613924ded7a0f965c80799b1d9230bc320d9..c68ee06cae637fe5aad2c51e2dba1e16af0b5064 100755 (executable)
@@ -103,7 +103,7 @@ done
 # diff with extra ignore lines
 check arch/x86/lib/memcpy_64.S        '-I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>"'
 check arch/x86/lib/memset_64.S        '-I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>"'
-check include/uapi/asm-generic/mman.h '-I "^#include <\(uapi/\)*asm-generic/mman-common.h>"'
+check include/uapi/asm-generic/mman.h '-I "^#include <\(uapi/\)*asm-generic/mman-common\(-tools\)*.h>"'
 check include/uapi/linux/mman.h       '-I "^#include <\(uapi/\)*asm/mman.h>"'
 
 # diff non-symmetric files
index e38518cdcbc3779c8e001b7c4de969942dd7a5ab..74ef92f1d19ad6637140829497997cc2cf8b686e 100755 (executable)
@@ -107,6 +107,7 @@ import os
 from PySide.QtCore import *
 from PySide.QtGui import *
 from PySide.QtSql import *
+pyside_version_1 = True
 from decimal import *
 from ctypes import *
 from multiprocessing import Process, Array, Value, Event
@@ -1526,6 +1527,19 @@ def BranchDataPrep(query):
                        " (" + dsoname(query.value(15)) + ")")
        return data
 
+def BranchDataPrepWA(query):
+       data = []
+       data.append(query.value(0))
+       # Workaround pyside failing to handle large integers (i.e. time) in python3 by converting to a string
+       data.append("{:>19}".format(query.value(1)))
+       for i in xrange(2, 8):
+               data.append(query.value(i))
+       data.append(tohex(query.value(8)).rjust(16) + " " + query.value(9) + offstr(query.value(10)) +
+                       " (" + dsoname(query.value(11)) + ")" + " -> " +
+                       tohex(query.value(12)) + " " + query.value(13) + offstr(query.value(14)) +
+                       " (" + dsoname(query.value(15)) + ")")
+       return data
+
 # Branch data model
 
 class BranchModel(TreeModel):
@@ -1553,7 +1567,11 @@ class BranchModel(TreeModel):
                        " AND evsel_id = " + str(self.event_id) +
                        " ORDER BY samples.id"
                        " LIMIT " + str(glb_chunk_sz))
-               self.fetcher = SQLFetcher(glb, sql, BranchDataPrep, self.AddSample)
+               if pyside_version_1 and sys.version_info[0] == 3:
+                       prep = BranchDataPrepWA
+               else:
+                       prep = BranchDataPrep
+               self.fetcher = SQLFetcher(glb, sql, prep, self.AddSample)
                self.fetcher.done.connect(self.Update)
                self.fetcher.Fetch(glb_chunk_sz)
 
@@ -2079,14 +2097,6 @@ def IsSelectable(db, table, sql = ""):
                return False
        return True
 
-# SQL data preparation
-
-def SQLTableDataPrep(query, count):
-       data = []
-       for i in xrange(count):
-               data.append(query.value(i))
-       return data
-
 # SQL table data model item
 
 class SQLTableItem():
@@ -2110,7 +2120,7 @@ class SQLTableModel(TableModel):
                self.more = True
                self.populated = 0
                self.column_headers = column_headers
-               self.fetcher = SQLFetcher(glb, sql, lambda x, y=len(column_headers): SQLTableDataPrep(x, y), self.AddSample)
+               self.fetcher = SQLFetcher(glb, sql, lambda x, y=len(column_headers): self.SQLTableDataPrep(x, y), self.AddSample)
                self.fetcher.done.connect(self.Update)
                self.fetcher.Fetch(glb_chunk_sz)
 
@@ -2154,6 +2164,12 @@ class SQLTableModel(TableModel):
        def columnHeader(self, column):
                return self.column_headers[column]
 
+       def SQLTableDataPrep(self, query, count):
+               data = []
+               for i in xrange(count):
+                       data.append(query.value(i))
+               return data
+
 # SQL automatic table data model
 
 class SQLAutoTableModel(SQLTableModel):
@@ -2182,8 +2198,32 @@ class SQLAutoTableModel(SQLTableModel):
                        QueryExec(query, "SELECT column_name FROM information_schema.columns WHERE table_schema = '" + schema + "' and table_name = '" + select_table_name + "'")
                        while query.next():
                                column_headers.append(query.value(0))
+               if pyside_version_1 and sys.version_info[0] == 3:
+                       if table_name == "samples_view":
+                               self.SQLTableDataPrep = self.samples_view_DataPrep
+                       if table_name == "samples":
+                               self.SQLTableDataPrep = self.samples_DataPrep
                super(SQLAutoTableModel, self).__init__(glb, sql, column_headers, parent)
 
+       def samples_view_DataPrep(self, query, count):
+               data = []
+               data.append(query.value(0))
+               # Workaround pyside failing to handle large integers (i.e. time) in python3 by converting to a string
+               data.append("{:>19}".format(query.value(1)))
+               for i in xrange(2, count):
+                       data.append(query.value(i))
+               return data
+
+       def samples_DataPrep(self, query, count):
+               data = []
+               for i in xrange(9):
+                       data.append(query.value(i))
+               # Workaround pyside failing to handle large integers (i.e. time) in python3 by converting to a string
+               data.append("{:>19}".format(query.value(9)))
+               for i in xrange(10, count):
+                       data.append(query.value(i))
+               return data
+
 # Base class for custom ResizeColumnsToContents
 
 class ResizeColumnsToContentsBase(QObject):
@@ -2868,9 +2908,13 @@ class LibXED():
                ok = self.xed_format_context(2, inst.xedp, inst.bufferp, sizeof(inst.buffer), ip, 0, 0)
                if not ok:
                        return 0, ""
+               if sys.version_info[0] == 2:
+                       result = inst.buffer.value
+               else:
+                       result = inst.buffer.value.decode()
                # Return instruction length and the disassembled instruction text
                # For now, assume the length is in byte 166
-               return inst.xedd[166], inst.buffer.value
+               return inst.xedd[166], result
 
 def TryOpen(file_name):
        try:
@@ -2886,9 +2930,14 @@ def Is64Bit(f):
        header = f.read(7)
        f.seek(pos)
        magic = header[0:4]
-       eclass = ord(header[4])
-       encoding = ord(header[5])
-       version = ord(header[6])
+       if sys.version_info[0] == 2:
+               eclass = ord(header[4])
+               encoding = ord(header[5])
+               version = ord(header[6])
+       else:
+               eclass = header[4]
+               encoding = header[5]
+               version = header[6]
        if magic == chr(127) + "ELF" and eclass > 0 and eclass < 3 and encoding > 0 and encoding < 3 and version == 1:
                result = True if eclass == 2 else False
        return result
index 32bac9c0d6947e68b65a240cd12bab853a8b6bfe..5f5eefcb3c748c13a7fe8a3cd9cd0abba9a1c538 100755 (executable)
@@ -1,15 +1,18 @@
 #!/bin/sh
 # SPDX-License-Identifier: LGPL-2.1
 
-if [ $# -ne 2 ] ; then
+if [ $# -ne 3 ] ; then
        [ $# -eq 1 ] && hostarch=$1 || hostarch=`uname -m | sed -e s/i.86/x86/ -e s/x86_64/x86/`
+       linux_header_dir=tools/include/uapi/linux
        header_dir=tools/include/uapi/asm-generic
        arch_header_dir=tools/arch/${hostarch}/include/uapi/asm
 else
-       header_dir=$1
-       arch_header_dir=$2
+       linux_header_dir=$1
+       header_dir=$2
+       arch_header_dir=$3
 fi
 
+linux_mman=${linux_header_dir}/mman.h
 arch_mman=${arch_header_dir}/mman.h
 
 # those in egrep -vw are flags, we want just the bits
@@ -20,6 +23,11 @@ egrep -q $regex ${arch_mman} && \
 (egrep $regex ${arch_mman} | \
        sed -r "s/$regex/\2 \1/g"       | \
        xargs printf "\t[ilog2(%s) + 1] = \"%s\",\n")
+egrep -q $regex ${linux_mman} && \
+(egrep $regex ${linux_mman} | \
+       egrep -vw 'MAP_(UNINITIALIZED|TYPE|SHARED_VALIDATE)' | \
+       sed -r "s/$regex/\2 \1/g"       | \
+       xargs printf "\t[ilog2(%s) + 1] = \"%s\",\n")
 ([ ! -f ${arch_mman} ] || egrep -q '#[[:space:]]*include[[:space:]]+<uapi/asm-generic/mman.*' ${arch_mman}) &&
 (egrep $regex ${header_dir}/mman-common.h | \
        egrep -vw 'MAP_(UNINITIALIZED|TYPE|SHARED_VALIDATE)' | \
index ba4c623cd8de0d00f89ba452efe5b91569c9c794..39fe21e1cf930108adeec730cabc77ca76bc7f78 100644 (file)
@@ -387,6 +387,7 @@ cs_etm_decoder__buffer_range(struct cs_etm_decoder *decoder,
                break;
        case OCSD_INSTR_ISB:
        case OCSD_INSTR_DSB_DMB:
+       case OCSD_INSTR_WFI_WFE:
        case OCSD_INSTR_OTHER:
        default:
                packet->last_instr_taken_branch = false;
index ec78e93085ded81aa9def49640b48495b93ead7c..6689378ee577c18ca1efac4b95f84c5c45d40404 100644 (file)
@@ -231,35 +231,6 @@ void perf_evlist__set_leader(struct perf_evlist *evlist)
        }
 }
 
-void perf_event_attr__set_max_precise_ip(struct perf_event_attr *pattr)
-{
-       struct perf_event_attr attr = {
-               .type           = PERF_TYPE_HARDWARE,
-               .config         = PERF_COUNT_HW_CPU_CYCLES,
-               .exclude_kernel = 1,
-               .precise_ip     = 3,
-       };
-
-       event_attr_init(&attr);
-
-       /*
-        * Unnamed union member, not supported as struct member named
-        * initializer in older compilers such as gcc 4.4.7
-        */
-       attr.sample_period = 1;
-
-       while (attr.precise_ip != 0) {
-               int fd = sys_perf_event_open(&attr, 0, -1, -1, 0);
-               if (fd != -1) {
-                       close(fd);
-                       break;
-               }
-               --attr.precise_ip;
-       }
-
-       pattr->precise_ip = attr.precise_ip;
-}
-
 int __perf_evlist__add_default(struct perf_evlist *evlist, bool precise)
 {
        struct perf_evsel *evsel = perf_evsel__new_cycles(precise);
index dcb68f34d2cd1a3cb4460adbda76eef879f9282e..6a94785b9100745af7d5cd75de10387fff70ab32 100644 (file)
@@ -315,8 +315,6 @@ void perf_evlist__to_front(struct perf_evlist *evlist,
 void perf_evlist__set_tracking_event(struct perf_evlist *evlist,
                                     struct perf_evsel *tracking_evsel);
 
-void perf_event_attr__set_max_precise_ip(struct perf_event_attr *attr);
-
 struct perf_evsel *
 perf_evlist__find_evsel_by_str(struct perf_evlist *evlist, const char *str);
 
index 7835e05f0c0a476c4d782a98dc8de5fb4494d55b..66d066f18b5b2de290a3cc4c4a1c1caa29baaeb8 100644 (file)
@@ -295,7 +295,6 @@ struct perf_evsel *perf_evsel__new_cycles(bool precise)
        if (!precise)
                goto new_event;
 
-       perf_event_attr__set_max_precise_ip(&attr);
        /*
         * Now let the usual logic to set up the perf_event_attr defaults
         * to kick in when we return and before perf_evsel__open() is called.
@@ -305,6 +304,8 @@ struct perf_evsel *perf_evsel__new_cycles(bool precise)
        if (evsel == NULL)
                goto out;
 
+       evsel->precise_max = true;
+
        /* use asprintf() because free(evsel) assumes name is allocated */
        if (asprintf(&evsel->name, "cycles%s%s%.*s",
                     (attr.precise_ip || attr.exclude_kernel) ? ":" : "",
@@ -1083,7 +1084,7 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts,
        }
 
        if (evsel->precise_max)
-               perf_event_attr__set_max_precise_ip(attr);
+               attr->precise_ip = 3;
 
        if (opts->all_user) {
                attr->exclude_kernel = 1;
@@ -1749,6 +1750,59 @@ static bool ignore_missing_thread(struct perf_evsel *evsel,
        return true;
 }
 
+static void display_attr(struct perf_event_attr *attr)
+{
+       if (verbose >= 2) {
+               fprintf(stderr, "%.60s\n", graph_dotted_line);
+               fprintf(stderr, "perf_event_attr:\n");
+               perf_event_attr__fprintf(stderr, attr, __open_attr__fprintf, NULL);
+               fprintf(stderr, "%.60s\n", graph_dotted_line);
+       }
+}
+
+static int perf_event_open(struct perf_evsel *evsel,
+                          pid_t pid, int cpu, int group_fd,
+                          unsigned long flags)
+{
+       int precise_ip = evsel->attr.precise_ip;
+       int fd;
+
+       while (1) {
+               pr_debug2("sys_perf_event_open: pid %d  cpu %d  group_fd %d  flags %#lx",
+                         pid, cpu, group_fd, flags);
+
+               fd = sys_perf_event_open(&evsel->attr, pid, cpu, group_fd, flags);
+               if (fd >= 0)
+                       break;
+
+               /*
+                * Do quick precise_ip fallback if:
+                *  - there is precise_ip set in perf_event_attr
+                *  - maximum precise is requested
+                *  - sys_perf_event_open failed with ENOTSUP error,
+                *    which is associated with wrong precise_ip
+                */
+               if (!precise_ip || !evsel->precise_max || (errno != ENOTSUP))
+                       break;
+
+               /*
+                * We tried all the precise_ip values, and it's
+                * still failing, so leave it to standard fallback.
+                */
+               if (!evsel->attr.precise_ip) {
+                       evsel->attr.precise_ip = precise_ip;
+                       break;
+               }
+
+               pr_debug2("\nsys_perf_event_open failed, error %d\n", -ENOTSUP);
+               evsel->attr.precise_ip--;
+               pr_debug2("decreasing precise_ip by one (%d)\n", evsel->attr.precise_ip);
+               display_attr(&evsel->attr);
+       }
+
+       return fd;
+}
+
 int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
                     struct thread_map *threads)
 {
@@ -1824,12 +1878,7 @@ int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
        if (perf_missing_features.sample_id_all)
                evsel->attr.sample_id_all = 0;
 
-       if (verbose >= 2) {
-               fprintf(stderr, "%.60s\n", graph_dotted_line);
-               fprintf(stderr, "perf_event_attr:\n");
-               perf_event_attr__fprintf(stderr, &evsel->attr, __open_attr__fprintf, NULL);
-               fprintf(stderr, "%.60s\n", graph_dotted_line);
-       }
+       display_attr(&evsel->attr);
 
        for (cpu = 0; cpu < cpus->nr; cpu++) {
 
@@ -1841,13 +1890,10 @@ int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
 
                        group_fd = get_group_fd(evsel, cpu, thread);
 retry_open:
-                       pr_debug2("sys_perf_event_open: pid %d  cpu %d  group_fd %d  flags %#lx",
-                                 pid, cpus->map[cpu], group_fd, flags);
-
                        test_attr__ready();
 
-                       fd = sys_perf_event_open(&evsel->attr, pid, cpus->map[cpu],
-                                                group_fd, flags);
+                       fd = perf_event_open(evsel, pid, cpus->map[cpu],
+                                            group_fd, flags);
 
                        FD(evsel, cpu, thread) = fd;
 
index 6e03db142091badf132f7a19869c76180f1075c4..872fab163585ac9dcf6b42ab7d95e8cae651714b 100644 (file)
@@ -251,19 +251,15 @@ struct intel_pt_decoder *intel_pt_decoder_new(struct intel_pt_params *params)
                if (!(decoder->tsc_ctc_ratio_n % decoder->tsc_ctc_ratio_d))
                        decoder->tsc_ctc_mult = decoder->tsc_ctc_ratio_n /
                                                decoder->tsc_ctc_ratio_d;
-
-               /*
-                * Allow for timestamps appearing to backwards because a TSC
-                * packet has slipped past a MTC packet, so allow 2 MTC ticks
-                * or ...
-                */
-               decoder->tsc_slip = multdiv(2 << decoder->mtc_shift,
-                                       decoder->tsc_ctc_ratio_n,
-                                       decoder->tsc_ctc_ratio_d);
        }
-       /* ... or 0x100 paranoia */
-       if (decoder->tsc_slip < 0x100)
-               decoder->tsc_slip = 0x100;
+
+       /*
+        * A TSC packet can slip past MTC packets so that the timestamp appears
+        * to go backwards. One estimate is that can be up to about 40 CPU
+        * cycles, which is certainly less than 0x1000 TSC ticks, but accept
+        * slippage an order of magnitude more to be on the safe side.
+        */
+       decoder->tsc_slip = 0x10000;
 
        intel_pt_log("timestamp: mtc_shift %u\n", decoder->mtc_shift);
        intel_pt_log("timestamp: tsc_ctc_ratio_n %u\n", decoder->tsc_ctc_ratio_n);
index 61959aba7e27008b68bd4cde372b15f1bcaefe39..3c520baa198cfcc49276ca77a8f8c12d058bec4d 100644 (file)
@@ -1421,6 +1421,20 @@ static void machine__set_kernel_mmap(struct machine *machine,
                machine->vmlinux_map->end = ~0ULL;
 }
 
+static void machine__update_kernel_mmap(struct machine *machine,
+                                    u64 start, u64 end)
+{
+       struct map *map = machine__kernel_map(machine);
+
+       map__get(map);
+       map_groups__remove(&machine->kmaps, map);
+
+       machine__set_kernel_mmap(machine, start, end);
+
+       map_groups__insert(&machine->kmaps, map);
+       map__put(map);
+}
+
 int machine__create_kernel_maps(struct machine *machine)
 {
        struct dso *kernel = machine__get_kernel(machine);
@@ -1453,17 +1467,11 @@ int machine__create_kernel_maps(struct machine *machine)
                        goto out_put;
                }
 
-               /* we have a real start address now, so re-order the kmaps */
-               map = machine__kernel_map(machine);
-
-               map__get(map);
-               map_groups__remove(&machine->kmaps, map);
-
-               /* assume it's the last in the kmaps */
-               machine__set_kernel_mmap(machine, addr, ~0ULL);
-
-               map_groups__insert(&machine->kmaps, map);
-               map__put(map);
+               /*
+                * we have a real start address now, so re-order the kmaps
+                * assume it's the last in the kmaps
+                */
+               machine__update_kernel_mmap(machine, addr, ~0ULL);
        }
 
        if (machine__create_extra_kernel_maps(machine, kernel))
@@ -1599,7 +1607,7 @@ static int machine__process_kernel_mmap_event(struct machine *machine,
                if (strstr(kernel->long_name, "vmlinux"))
                        dso__set_short_name(kernel, "[kernel.vmlinux]", false);
 
-               machine__set_kernel_mmap(machine, event->mmap.start,
+               machine__update_kernel_mmap(machine, event->mmap.start,
                                         event->mmap.start + event->mmap.len);
 
                /*
index 6199a3174ab95eccd1c135dc7d477161296f9883..e0429f4ef335896a9ad393f1e3e3f30b4a4acc9f 100644 (file)
@@ -732,10 +732,20 @@ static void pmu_add_cpu_aliases(struct list_head *head, struct perf_pmu *pmu)
 
                if (!is_arm_pmu_core(name)) {
                        pname = pe->pmu ? pe->pmu : "cpu";
+
+                       /*
+                        * uncore alias may be from different PMU
+                        * with common prefix
+                        */
+                       if (pmu_is_uncore(name) &&
+                           !strncmp(pname, name, strlen(pname)))
+                               goto new_alias;
+
                        if (strcmp(pname, name))
                                continue;
                }
 
+new_alias:
                /* need type casts to override 'const' */
                __perf_pmu__new_alias(head, NULL, (char *)pe->name,
                                (char *)pe->desc, (char *)pe->event,
index c3fad065c89c085b39da83de4a751041a99ae3d6..c7727be9719f4ea9b9524ddf1ff92daeebab8982 100644 (file)
@@ -44,6 +44,7 @@
 #include <cpuid.h>
 #include <linux/capability.h>
 #include <errno.h>
+#include <math.h>
 
 char *proc_stat = "/proc/stat";
 FILE *outf;
@@ -63,7 +64,6 @@ unsigned int dump_only;
 unsigned int do_snb_cstates;
 unsigned int do_knl_cstates;
 unsigned int do_slm_cstates;
-unsigned int do_cnl_cstates;
 unsigned int use_c1_residency_msr;
 unsigned int has_aperf;
 unsigned int has_epb;
@@ -141,9 +141,21 @@ unsigned int first_counter_read = 1;
 
 #define RAPL_CORES_ENERGY_STATUS       (1 << 9)
                                        /* 0x639 MSR_PP0_ENERGY_STATUS */
+#define RAPL_PER_CORE_ENERGY   (1 << 10)
+                                       /* Indicates cores energy collection is per-core,
+                                        * not per-package. */
+#define RAPL_AMD_F17H          (1 << 11)
+                                       /* 0xc0010299 MSR_RAPL_PWR_UNIT */
+                                       /* 0xc001029a MSR_CORE_ENERGY_STAT */
+                                       /* 0xc001029b MSR_PKG_ENERGY_STAT */
 #define RAPL_CORES (RAPL_CORES_ENERGY_STATUS | RAPL_CORES_POWER_LIMIT)
 #define        TJMAX_DEFAULT   100
 
+/* MSRs that are not yet in the kernel-provided header. */
+#define MSR_RAPL_PWR_UNIT      0xc0010299
+#define MSR_CORE_ENERGY_STAT   0xc001029a
+#define MSR_PKG_ENERGY_STAT    0xc001029b
+
 #define MAX(a, b) ((a) > (b) ? (a) : (b))
 
 /*
@@ -187,6 +199,7 @@ struct core_data {
        unsigned long long c7;
        unsigned long long mc6_us;      /* duplicate as per-core for now, even though per module */
        unsigned int core_temp_c;
+       unsigned int core_energy;       /* MSR_CORE_ENERGY_STAT */
        unsigned int core_id;
        unsigned long long counter[MAX_ADDED_COUNTERS];
 } *core_even, *core_odd;
@@ -273,6 +286,7 @@ struct system_summary {
 
 struct cpu_topology {
        int physical_package_id;
+       int die_id;
        int logical_cpu_id;
        int physical_node_id;
        int logical_node_id;    /* 0-based count within the package */
@@ -283,6 +297,7 @@ struct cpu_topology {
 
 struct topo_params {
        int num_packages;
+       int num_die;
        int num_cpus;
        int num_cores;
        int max_cpu_num;
@@ -314,9 +329,8 @@ int for_all_cpus(int (func)(struct thread_data *, struct core_data *, struct pkg
        int retval, pkg_no, core_no, thread_no, node_no;
 
        for (pkg_no = 0; pkg_no < topo.num_packages; ++pkg_no) {
-               for (core_no = 0; core_no < topo.cores_per_node; ++core_no) {
-                       for (node_no = 0; node_no < topo.nodes_per_pkg;
-                            node_no++) {
+               for (node_no = 0; node_no < topo.nodes_per_pkg; node_no++) {
+                       for (core_no = 0; core_no < topo.cores_per_node; ++core_no) {
                                for (thread_no = 0; thread_no <
                                        topo.threads_per_core; ++thread_no) {
                                        struct thread_data *t;
@@ -442,6 +456,7 @@ struct msr_counter bic[] = {
        { 0x0, "CPU" },
        { 0x0, "APIC" },
        { 0x0, "X2APIC" },
+       { 0x0, "Die" },
 };
 
 #define MAX_BIC (sizeof(bic) / sizeof(struct msr_counter))
@@ -495,6 +510,7 @@ struct msr_counter bic[] = {
 #define        BIC_CPU         (1ULL << 47)
 #define        BIC_APIC        (1ULL << 48)
 #define        BIC_X2APIC      (1ULL << 49)
+#define        BIC_Die         (1ULL << 50)
 
 #define BIC_DISABLED_BY_DEFAULT        (BIC_USEC | BIC_TOD | BIC_APIC | BIC_X2APIC)
 
@@ -621,6 +637,8 @@ void print_header(char *delim)
                outp += sprintf(outp, "%sTime_Of_Day_Seconds", (printed++ ? delim : ""));
        if (DO_BIC(BIC_Package))
                outp += sprintf(outp, "%sPackage", (printed++ ? delim : ""));
+       if (DO_BIC(BIC_Die))
+               outp += sprintf(outp, "%sDie", (printed++ ? delim : ""));
        if (DO_BIC(BIC_Node))
                outp += sprintf(outp, "%sNode", (printed++ ? delim : ""));
        if (DO_BIC(BIC_Core))
@@ -667,7 +685,7 @@ void print_header(char *delim)
 
        if (DO_BIC(BIC_CPU_c1))
                outp += sprintf(outp, "%sCPU%%c1", (printed++ ? delim : ""));
-       if (DO_BIC(BIC_CPU_c3) && !do_slm_cstates && !do_knl_cstates && !do_cnl_cstates)
+       if (DO_BIC(BIC_CPU_c3))
                outp += sprintf(outp, "%sCPU%%c3", (printed++ ? delim : ""));
        if (DO_BIC(BIC_CPU_c6))
                outp += sprintf(outp, "%sCPU%%c6", (printed++ ? delim : ""));
@@ -680,6 +698,14 @@ void print_header(char *delim)
        if (DO_BIC(BIC_CoreTmp))
                outp += sprintf(outp, "%sCoreTmp", (printed++ ? delim : ""));
 
+       if (do_rapl && !rapl_joules) {
+               if (DO_BIC(BIC_CorWatt) && (do_rapl & RAPL_PER_CORE_ENERGY))
+                       outp += sprintf(outp, "%sCorWatt", (printed++ ? delim : ""));
+       } else if (do_rapl && rapl_joules) {
+               if (DO_BIC(BIC_Cor_J) && (do_rapl & RAPL_PER_CORE_ENERGY))
+                       outp += sprintf(outp, "%sCor_J", (printed++ ? delim : ""));
+       }
+
        for (mp = sys.cp; mp; mp = mp->next) {
                if (mp->format == FORMAT_RAW) {
                        if (mp->width == 64)
@@ -734,7 +760,7 @@ void print_header(char *delim)
        if (do_rapl && !rapl_joules) {
                if (DO_BIC(BIC_PkgWatt))
                        outp += sprintf(outp, "%sPkgWatt", (printed++ ? delim : ""));
-               if (DO_BIC(BIC_CorWatt))
+               if (DO_BIC(BIC_CorWatt) && !(do_rapl & RAPL_PER_CORE_ENERGY))
                        outp += sprintf(outp, "%sCorWatt", (printed++ ? delim : ""));
                if (DO_BIC(BIC_GFXWatt))
                        outp += sprintf(outp, "%sGFXWatt", (printed++ ? delim : ""));
@@ -747,7 +773,7 @@ void print_header(char *delim)
        } else if (do_rapl && rapl_joules) {
                if (DO_BIC(BIC_Pkg_J))
                        outp += sprintf(outp, "%sPkg_J", (printed++ ? delim : ""));
-               if (DO_BIC(BIC_Cor_J))
+               if (DO_BIC(BIC_Cor_J) && !(do_rapl & RAPL_PER_CORE_ENERGY))
                        outp += sprintf(outp, "%sCor_J", (printed++ ? delim : ""));
                if (DO_BIC(BIC_GFX_J))
                        outp += sprintf(outp, "%sGFX_J", (printed++ ? delim : ""));
@@ -808,6 +834,7 @@ int dump_counters(struct thread_data *t, struct core_data *c,
                outp += sprintf(outp, "c6: %016llX\n", c->c6);
                outp += sprintf(outp, "c7: %016llX\n", c->c7);
                outp += sprintf(outp, "DTS: %dC\n", c->core_temp_c);
+               outp += sprintf(outp, "Joules: %0X\n", c->core_energy);
 
                for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) {
                        outp += sprintf(outp, "cADDED [%d] msr0x%x: %08llX\n",
@@ -904,6 +931,8 @@ int format_counters(struct thread_data *t, struct core_data *c,
        if (t == &average.threads) {
                if (DO_BIC(BIC_Package))
                        outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
+               if (DO_BIC(BIC_Die))
+                       outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
                if (DO_BIC(BIC_Node))
                        outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
                if (DO_BIC(BIC_Core))
@@ -921,6 +950,12 @@ int format_counters(struct thread_data *t, struct core_data *c,
                        else
                                outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
                }
+               if (DO_BIC(BIC_Die)) {
+                       if (c)
+                               outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), cpus[t->cpu_id].die_id);
+                       else
+                               outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
+               }
                if (DO_BIC(BIC_Node)) {
                        if (t)
                                outp += sprintf(outp, "%s%d",
@@ -1003,7 +1038,7 @@ int format_counters(struct thread_data *t, struct core_data *c,
        if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
                goto done;
 
-       if (DO_BIC(BIC_CPU_c3) && !do_slm_cstates && !do_knl_cstates && !do_cnl_cstates)
+       if (DO_BIC(BIC_CPU_c3))
                outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * c->c3/tsc);
        if (DO_BIC(BIC_CPU_c6))
                outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * c->c6/tsc);
@@ -1033,6 +1068,20 @@ int format_counters(struct thread_data *t, struct core_data *c,
                }
        }
 
+       /*
+        * If measurement interval exceeds minimum RAPL Joule Counter range,
+        * indicate that results are suspect by printing "**" in fraction place.
+        */
+       if (interval_float < rapl_joule_counter_range)
+               fmt8 = "%s%.2f";
+       else
+               fmt8 = "%6.0f**";
+
+       if (DO_BIC(BIC_CorWatt) && (do_rapl & RAPL_PER_CORE_ENERGY))
+               outp += sprintf(outp, fmt8, (printed++ ? delim : ""), c->core_energy * rapl_energy_units / interval_float);
+       if (DO_BIC(BIC_Cor_J) && (do_rapl & RAPL_PER_CORE_ENERGY))
+               outp += sprintf(outp, fmt8, (printed++ ? delim : ""), c->core_energy * rapl_energy_units);
+
        /* print per-package data only for 1st core in package */
        if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
                goto done;
@@ -1085,18 +1134,9 @@ int format_counters(struct thread_data *t, struct core_data *c,
        if (DO_BIC(BIC_SYS_LPI))
                outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->sys_lpi / 1000000.0 / interval_float);
 
-       /*
-        * If measurement interval exceeds minimum RAPL Joule Counter range,
-        * indicate that results are suspect by printing "**" in fraction place.
-        */
-       if (interval_float < rapl_joule_counter_range)
-               fmt8 = "%s%.2f";
-       else
-               fmt8 = "%6.0f**";
-
        if (DO_BIC(BIC_PkgWatt))
                outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_pkg * rapl_energy_units / interval_float);
-       if (DO_BIC(BIC_CorWatt))
+       if (DO_BIC(BIC_CorWatt) && !(do_rapl & RAPL_PER_CORE_ENERGY))
                outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_cores * rapl_energy_units / interval_float);
        if (DO_BIC(BIC_GFXWatt))
                outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_gfx * rapl_energy_units / interval_float);
@@ -1104,7 +1144,7 @@ int format_counters(struct thread_data *t, struct core_data *c,
                outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_dram * rapl_dram_energy_units / interval_float);
        if (DO_BIC(BIC_Pkg_J))
                outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_pkg * rapl_energy_units);
-       if (DO_BIC(BIC_Cor_J))
+       if (DO_BIC(BIC_Cor_J) && !(do_rapl & RAPL_PER_CORE_ENERGY))
                outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_cores * rapl_energy_units);
        if (DO_BIC(BIC_GFX_J))
                outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_gfx * rapl_energy_units);
@@ -1249,6 +1289,8 @@ delta_core(struct core_data *new, struct core_data *old)
        old->core_temp_c = new->core_temp_c;
        old->mc6_us = new->mc6_us - old->mc6_us;
 
+       DELTA_WRAP32(new->core_energy, old->core_energy);
+
        for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) {
                if (mp->format == FORMAT_RAW)
                        old->counter[i] = new->counter[i];
@@ -1391,6 +1433,7 @@ void clear_counters(struct thread_data *t, struct core_data *c, struct pkg_data
        c->c7 = 0;
        c->mc6_us = 0;
        c->core_temp_c = 0;
+       c->core_energy = 0;
 
        p->pkg_wtd_core_c0 = 0;
        p->pkg_any_core_c0 = 0;
@@ -1473,6 +1516,8 @@ int sum_counters(struct thread_data *t, struct core_data *c,
 
        average.cores.core_temp_c = MAX(average.cores.core_temp_c, c->core_temp_c);
 
+       average.cores.core_energy += c->core_energy;
+
        for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) {
                if (mp->format == FORMAT_RAW)
                        continue;
@@ -1818,7 +1863,7 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
        if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
                goto done;
 
-       if (DO_BIC(BIC_CPU_c3) && !do_slm_cstates && !do_knl_cstates && !do_cnl_cstates) {
+       if (DO_BIC(BIC_CPU_c3)) {
                if (get_msr(cpu, MSR_CORE_C3_RESIDENCY, &c->c3))
                        return -6;
        }
@@ -1845,6 +1890,12 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
                c->core_temp_c = tcc_activation_temp - ((msr >> 16) & 0x7F);
        }
 
+       if (do_rapl & RAPL_AMD_F17H) {
+               if (get_msr(cpu, MSR_CORE_ENERGY_STAT, &msr))
+                       return -14;
+               c->core_energy = msr & 0xFFFFFFFF;
+       }
+
        for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) {
                if (get_mp(cpu, mp, &c->counter[i]))
                        return -10;
@@ -1934,6 +1985,11 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
                        return -16;
                p->rapl_dram_perf_status = msr & 0xFFFFFFFF;
        }
+       if (do_rapl & RAPL_AMD_F17H) {
+               if (get_msr(cpu, MSR_PKG_ENERGY_STAT, &msr))
+                       return -13;
+               p->energy_pkg = msr & 0xFFFFFFFF;
+       }
        if (DO_BIC(BIC_PkgTmp)) {
                if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_STATUS, &msr))
                        return -17;
@@ -2456,6 +2512,8 @@ void free_all_buffers(void)
 
 /*
  * Parse a file containing a single int.
+ * Return 0 if file can not be opened
+ * Exit if file can be opened, but can not be parsed
  */
 int parse_int_file(const char *fmt, ...)
 {
@@ -2467,7 +2525,9 @@ int parse_int_file(const char *fmt, ...)
        va_start(args, fmt);
        vsnprintf(path, sizeof(path), fmt, args);
        va_end(args);
-       filep = fopen_or_die(path, "r");
+       filep = fopen(path, "r");
+       if (!filep)
+               return 0;
        if (fscanf(filep, "%d", &value) != 1)
                err(1, "%s: failed to parse number from file", path);
        fclose(filep);
@@ -2488,6 +2548,11 @@ int get_physical_package_id(int cpu)
        return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/physical_package_id", cpu);
 }
 
+int get_die_id(int cpu)
+{
+       return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/die_id", cpu);
+}
+
 int get_core_id(int cpu)
 {
        return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/core_id", cpu);
@@ -2578,7 +2643,8 @@ int get_thread_siblings(struct cpu_topology *thiscpu)
        filep = fopen_or_die(path, "r");
        do {
                offset -= BITMASK_SIZE;
-               fscanf(filep, "%lx%c", &map, &character);
+               if (fscanf(filep, "%lx%c", &map, &character) != 2)
+                       err(1, "%s: failed to parse file", path);
                for (shift = 0; shift < BITMASK_SIZE; shift++) {
                        if ((map >> shift) & 0x1) {
                                so = shift + offset;
@@ -2855,8 +2921,11 @@ int snapshot_cpu_lpi_us(void)
        fp = fopen_or_die("/sys/devices/system/cpu/cpuidle/low_power_idle_cpu_residency_us", "r");
 
        retval = fscanf(fp, "%lld", &cpuidle_cur_cpu_lpi_us);
-       if (retval != 1)
-               err(1, "CPU LPI");
+       if (retval != 1) {
+               fprintf(stderr, "Disabling Low Power Idle CPU output\n");
+               BIC_NOT_PRESENT(BIC_CPU_LPI);
+               return -1;
+       }
 
        fclose(fp);
 
@@ -2878,9 +2947,11 @@ int snapshot_sys_lpi_us(void)
        fp = fopen_or_die("/sys/devices/system/cpu/cpuidle/low_power_idle_system_residency_us", "r");
 
        retval = fscanf(fp, "%lld", &cpuidle_cur_sys_lpi_us);
-       if (retval != 1)
-               err(1, "SYS LPI");
-
+       if (retval != 1) {
+               fprintf(stderr, "Disabling Low Power Idle System output\n");
+               BIC_NOT_PRESENT(BIC_SYS_LPI);
+               return -1;
+       }
        fclose(fp);
 
        return 0;
@@ -3410,14 +3481,14 @@ dump_sysfs_cstate_config(void)
                input = fopen(path, "r");
                if (input == NULL)
                        continue;
-               fgets(name_buf, sizeof(name_buf), input);
+               if (!fgets(name_buf, sizeof(name_buf), input))
+                       err(1, "%s: failed to read file", path);
 
                 /* truncate "C1-HSW\n" to "C1", or truncate "C1\n" to "C1" */
                sp = strchr(name_buf, '-');
                if (!sp)
                        sp = strchrnul(name_buf, '\n');
                *sp = '\0';
-
                fclose(input);
 
                sprintf(path, "/sys/devices/system/cpu/cpu%d/cpuidle/state%d/desc",
@@ -3425,7 +3496,8 @@ dump_sysfs_cstate_config(void)
                input = fopen(path, "r");
                if (input == NULL)
                        continue;
-               fgets(desc, sizeof(desc), input);
+               if (!fgets(desc, sizeof(desc), input))
+                       err(1, "%s: failed to read file", path);
 
                fprintf(outf, "cpu%d: %s: %s", base_cpu, name_buf, desc);
                fclose(input);
@@ -3444,20 +3516,22 @@ dump_sysfs_pstate_config(void)
                        base_cpu);
        input = fopen(path, "r");
        if (input == NULL) {
-               fprintf(stderr, "NSFOD %s\n", path);
+               fprintf(outf, "NSFOD %s\n", path);
                return;
        }
-       fgets(driver_buf, sizeof(driver_buf), input);
+       if (!fgets(driver_buf, sizeof(driver_buf), input))
+               err(1, "%s: failed to read file", path);
        fclose(input);
 
        sprintf(path, "/sys/devices/system/cpu/cpu%d/cpufreq/scaling_governor",
                        base_cpu);
        input = fopen(path, "r");
        if (input == NULL) {
-               fprintf(stderr, "NSFOD %s\n", path);
+               fprintf(outf, "NSFOD %s\n", path);
                return;
        }
-       fgets(governor_buf, sizeof(governor_buf), input);
+       if (!fgets(governor_buf, sizeof(governor_buf), input))
+               err(1, "%s: failed to read file", path);
        fclose(input);
 
        fprintf(outf, "cpu%d: cpufreq driver: %s", base_cpu, driver_buf);
@@ -3466,7 +3540,8 @@ dump_sysfs_pstate_config(void)
        sprintf(path, "/sys/devices/system/cpu/cpufreq/boost");
        input = fopen(path, "r");
        if (input != NULL) {
-               fscanf(input, "%d", &turbo);
+               if (fscanf(input, "%d", &turbo) != 1)
+                       err(1, "%s: failed to parse number from file", path);
                fprintf(outf, "cpufreq boost: %d\n", turbo);
                fclose(input);
        }
@@ -3474,7 +3549,8 @@ dump_sysfs_pstate_config(void)
        sprintf(path, "/sys/devices/system/cpu/intel_pstate/no_turbo");
        input = fopen(path, "r");
        if (input != NULL) {
-               fscanf(input, "%d", &turbo);
+               if (fscanf(input, "%d", &turbo) != 1)
+                       err(1, "%s: failed to parse number from file", path);
                fprintf(outf, "cpufreq intel_pstate no_turbo: %d\n", turbo);
                fclose(input);
        }
@@ -3718,7 +3794,7 @@ int print_perf_limit(struct thread_data *t, struct core_data *c, struct pkg_data
 #define        RAPL_POWER_GRANULARITY  0x7FFF  /* 15 bit power granularity */
 #define        RAPL_TIME_GRANULARITY   0x3F /* 6 bit time granularity */
 
-double get_tdp(unsigned int model)
+double get_tdp_intel(unsigned int model)
 {
        unsigned long long msr;
 
@@ -3735,6 +3811,16 @@ double get_tdp(unsigned int model)
        }
 }
 
+double get_tdp_amd(unsigned int family)
+{
+       switch (family) {
+       case 0x17:
+       default:
+               /* This is the max stock TDP of HEDT/Server Fam17h chips */
+               return 250.0;
+       }
+}
+
 /*
  * rapl_dram_energy_units_probe()
  * Energy units are either hard-coded, or come from RAPL Energy Unit MSR.
@@ -3754,21 +3840,12 @@ rapl_dram_energy_units_probe(int  model, double rapl_energy_units)
        }
 }
 
-
-/*
- * rapl_probe()
- *
- * sets do_rapl, rapl_power_units, rapl_energy_units, rapl_time_units
- */
-void rapl_probe(unsigned int family, unsigned int model)
+void rapl_probe_intel(unsigned int family, unsigned int model)
 {
        unsigned long long msr;
        unsigned int time_unit;
        double tdp;
 
-       if (!genuine_intel)
-               return;
-
        if (family != 6)
                return;
 
@@ -3892,13 +3969,69 @@ void rapl_probe(unsigned int family, unsigned int model)
 
        rapl_time_units = 1.0 / (1 << (time_unit));
 
-       tdp = get_tdp(model);
+       tdp = get_tdp_intel(model);
 
        rapl_joule_counter_range = 0xFFFFFFFF * rapl_energy_units / tdp;
        if (!quiet)
                fprintf(outf, "RAPL: %.0f sec. Joule Counter Range, at %.0f Watts\n", rapl_joule_counter_range, tdp);
+}
 
-       return;
+void rapl_probe_amd(unsigned int family, unsigned int model)
+{
+       unsigned long long msr;
+       unsigned int eax, ebx, ecx, edx;
+       unsigned int has_rapl = 0;
+       double tdp;
+
+       if (max_extended_level >= 0x80000007) {
+               __cpuid(0x80000007, eax, ebx, ecx, edx);
+               /* RAPL (Fam 17h) */
+               has_rapl = edx & (1 << 14);
+       }
+
+       if (!has_rapl)
+               return;
+
+       switch (family) {
+       case 0x17: /* Zen, Zen+ */
+               do_rapl = RAPL_AMD_F17H | RAPL_PER_CORE_ENERGY;
+               if (rapl_joules) {
+                       BIC_PRESENT(BIC_Pkg_J);
+                       BIC_PRESENT(BIC_Cor_J);
+               } else {
+                       BIC_PRESENT(BIC_PkgWatt);
+                       BIC_PRESENT(BIC_CorWatt);
+               }
+               break;
+       default:
+               return;
+       }
+
+       if (get_msr(base_cpu, MSR_RAPL_PWR_UNIT, &msr))
+               return;
+
+       rapl_time_units = ldexp(1.0, -(msr >> 16 & 0xf));
+       rapl_energy_units = ldexp(1.0, -(msr >> 8 & 0x1f));
+       rapl_power_units = ldexp(1.0, -(msr & 0xf));
+
+       tdp = get_tdp_amd(model);
+
+       rapl_joule_counter_range = 0xFFFFFFFF * rapl_energy_units / tdp;
+       if (!quiet)
+               fprintf(outf, "RAPL: %.0f sec. Joule Counter Range, at %.0f Watts\n", rapl_joule_counter_range, tdp);
+}
+
+/*
+ * rapl_probe()
+ *
+ * sets do_rapl, rapl_power_units, rapl_energy_units, rapl_time_units
+ */
+void rapl_probe(unsigned int family, unsigned int model)
+{
+       if (genuine_intel)
+               rapl_probe_intel(family, model);
+       if (authentic_amd)
+               rapl_probe_amd(family, model);
 }
 
 void perf_limit_reasons_probe(unsigned int family, unsigned int model)
@@ -4003,6 +4136,7 @@ void print_power_limit_msr(int cpu, unsigned long long msr, char *label)
 int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p)
 {
        unsigned long long msr;
+       const char *msr_name;
        int cpu;
 
        if (!do_rapl)
@@ -4018,10 +4152,17 @@ int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p)
                return -1;
        }
 
-       if (get_msr(cpu, MSR_RAPL_POWER_UNIT, &msr))
-               return -1;
+       if (do_rapl & RAPL_AMD_F17H) {
+               msr_name = "MSR_RAPL_PWR_UNIT";
+               if (get_msr(cpu, MSR_RAPL_PWR_UNIT, &msr))
+                       return -1;
+       } else {
+               msr_name = "MSR_RAPL_POWER_UNIT";
+               if (get_msr(cpu, MSR_RAPL_POWER_UNIT, &msr))
+                       return -1;
+       }
 
-       fprintf(outf, "cpu%d: MSR_RAPL_POWER_UNIT: 0x%08llx (%f Watts, %f Joules, %f sec.)\n", cpu, msr,
+       fprintf(outf, "cpu%d: %s: 0x%08llx (%f Watts, %f Joules, %f sec.)\n", cpu, msr_name, msr,
                rapl_power_units, rapl_energy_units, rapl_time_units);
 
        if (do_rapl & RAPL_PKG_POWER_INFO) {
@@ -4451,6 +4592,9 @@ unsigned int intel_model_duplicates(unsigned int model)
        case INTEL_FAM6_KABYLAKE_MOBILE:
        case INTEL_FAM6_KABYLAKE_DESKTOP:
                return INTEL_FAM6_SKYLAKE_MOBILE;
+
+       case INTEL_FAM6_ICELAKE_MOBILE:
+               return INTEL_FAM6_CANNONLAKE_MOBILE;
        }
        return model;
 }
@@ -4702,7 +4846,9 @@ void process_cpuid()
        }
        do_slm_cstates = is_slm(family, model);
        do_knl_cstates  = is_knl(family, model);
-       do_cnl_cstates = is_cnl(family, model);
+
+       if (do_slm_cstates || do_knl_cstates || is_cnl(family, model))
+               BIC_NOT_PRESENT(BIC_CPU_c3);
 
        if (!quiet)
                decode_misc_pwr_mgmt_msr();
@@ -4769,6 +4915,7 @@ void topology_probe()
        int i;
        int max_core_id = 0;
        int max_package_id = 0;
+       int max_die_id = 0;
        int max_siblings = 0;
 
        /* Initialize num_cpus, max_cpu_num */
@@ -4835,6 +4982,11 @@ void topology_probe()
                if (cpus[i].physical_package_id > max_package_id)
                        max_package_id = cpus[i].physical_package_id;
 
+               /* get die information */
+               cpus[i].die_id = get_die_id(i);
+               if (cpus[i].die_id > max_die_id)
+                       max_die_id = cpus[i].die_id;
+
                /* get numa node information */
                cpus[i].physical_node_id = get_physical_node_id(&cpus[i]);
                if (cpus[i].physical_node_id > topo.max_node_num)
@@ -4860,6 +5012,13 @@ void topology_probe()
        if (!summary_only && topo.cores_per_node > 1)
                BIC_PRESENT(BIC_Core);
 
+       topo.num_die = max_die_id + 1;
+       if (debug > 1)
+               fprintf(outf, "max_die_id %d, sizing for %d die\n",
+                               max_die_id, topo.num_die);
+       if (!summary_only && topo.num_die > 1)
+               BIC_PRESENT(BIC_Die);
+
        topo.num_packages = max_package_id + 1;
        if (debug > 1)
                fprintf(outf, "max_package_id %d, sizing for %d packages\n",
@@ -4884,8 +5043,8 @@ void topology_probe()
                if (cpu_is_not_present(i))
                        continue;
                fprintf(outf,
-                       "cpu %d pkg %d node %d lnode %d core %d thread %d\n",
-                       i, cpus[i].physical_package_id,
+                       "cpu %d pkg %d die %d node %d lnode %d core %d thread %d\n",
+                       i, cpus[i].physical_package_id, cpus[i].die_id,
                        cpus[i].physical_node_id,
                        cpus[i].logical_node_id,
                        cpus[i].physical_core_id,
@@ -5122,7 +5281,7 @@ int get_and_dump_counters(void)
 }
 
 void print_version() {
-       fprintf(outf, "turbostat version 18.07.27"
+       fprintf(outf, "turbostat version 19.03.20"
                " - Len Brown <lenb@kernel.org>\n");
 }
 
@@ -5319,7 +5478,8 @@ void probe_sysfs(void)
                input = fopen(path, "r");
                if (input == NULL)
                        continue;
-               fgets(name_buf, sizeof(name_buf), input);
+               if (!fgets(name_buf, sizeof(name_buf), input))
+                       err(1, "%s: failed to read file", path);
 
                 /* truncate "C1-HSW\n" to "C1", or truncate "C1\n" to "C1" */
                sp = strchr(name_buf, '-');
@@ -5346,7 +5506,8 @@ void probe_sysfs(void)
                input = fopen(path, "r");
                if (input == NULL)
                        continue;
-               fgets(name_buf, sizeof(name_buf), input);
+               if (!fgets(name_buf, sizeof(name_buf), input))
+                       err(1, "%s: failed to read file", path);
                 /* truncate "C1-HSW\n" to "C1", or truncate "C1\n" to "C1" */
                sp = strchr(name_buf, '-');
                if (!sp)
index b579f962451d6464035c6649ac714998c05a225f..85ffdcfa596b5011b93abf3c65e90cd33cceb61f 100644 (file)
@@ -146,6 +146,7 @@ static int dimm_fail_cmd_code[ARRAY_SIZE(handle)];
 struct nfit_test_sec {
        u8 state;
        u8 ext_state;
+       u8 old_state;
        u8 passphrase[32];
        u8 master_passphrase[32];
        u64 overwrite_end_time;
@@ -225,6 +226,8 @@ static struct workqueue_struct *nfit_wq;
 
 static struct gen_pool *nfit_pool;
 
+static const char zero_key[NVDIMM_PASSPHRASE_LEN];
+
 static struct nfit_test *to_nfit_test(struct device *dev)
 {
        struct platform_device *pdev = to_platform_device(dev);
@@ -1059,8 +1062,7 @@ static int nd_intel_test_cmd_secure_erase(struct nfit_test *t,
        struct device *dev = &t->pdev.dev;
        struct nfit_test_sec *sec = &dimm_sec_info[dimm];
 
-       if (!(sec->state & ND_INTEL_SEC_STATE_ENABLED) ||
-                       (sec->state & ND_INTEL_SEC_STATE_FROZEN)) {
+       if (sec->state & ND_INTEL_SEC_STATE_FROZEN) {
                nd_cmd->status = ND_INTEL_STATUS_INVALID_STATE;
                dev_dbg(dev, "secure erase: wrong security state\n");
        } else if (memcmp(nd_cmd->passphrase, sec->passphrase,
@@ -1068,6 +1070,12 @@ static int nd_intel_test_cmd_secure_erase(struct nfit_test *t,
                nd_cmd->status = ND_INTEL_STATUS_INVALID_PASS;
                dev_dbg(dev, "secure erase: wrong passphrase\n");
        } else {
+               if (!(sec->state & ND_INTEL_SEC_STATE_ENABLED)
+                               && (memcmp(nd_cmd->passphrase, zero_key,
+                                       ND_INTEL_PASSPHRASE_SIZE) != 0)) {
+                       dev_dbg(dev, "invalid zero key\n");
+                       return 0;
+               }
                memset(sec->passphrase, 0, ND_INTEL_PASSPHRASE_SIZE);
                memset(sec->master_passphrase, 0, ND_INTEL_PASSPHRASE_SIZE);
                sec->state = 0;
@@ -1093,7 +1101,7 @@ static int nd_intel_test_cmd_overwrite(struct nfit_test *t,
                return 0;
        }
 
-       memset(sec->passphrase, 0, ND_INTEL_PASSPHRASE_SIZE);
+       sec->old_state = sec->state;
        sec->state = ND_INTEL_SEC_STATE_OVERWRITE;
        dev_dbg(dev, "overwrite progressing.\n");
        sec->overwrite_end_time = get_jiffies_64() + 5 * HZ;
@@ -1115,7 +1123,8 @@ static int nd_intel_test_cmd_query_overwrite(struct nfit_test *t,
 
        if (time_is_before_jiffies64(sec->overwrite_end_time)) {
                sec->overwrite_end_time = 0;
-               sec->state = 0;
+               sec->state = sec->old_state;
+               sec->old_state = 0;
                sec->ext_state = ND_INTEL_SEC_ESTATE_ENABLED;
                dev_dbg(dev, "overwrite is complete\n");
        } else
index 77b73b892136be4f96fa772ad9e24c9d8a77544c..078283d073b067736da1f2c4948a2c3b86f8a584 100644 (file)
@@ -209,7 +209,7 @@ ifeq ($(DWARF2BTF),y)
 endif
 
 PROG_TESTS_H := $(OUTPUT)/prog_tests/tests.h
-$(OUTPUT)/test_progs: $(PROG_TESTS_H)
+test_progs.c: $(PROG_TESTS_H)
 $(OUTPUT)/test_progs: CFLAGS += $(TEST_PROGS_CFLAGS)
 $(OUTPUT)/test_progs: prog_tests/*.c
 
@@ -232,7 +232,7 @@ $(PROG_TESTS_H): $(PROG_TESTS_DIR) $(PROG_TESTS_FILES)
                 ) > $(PROG_TESTS_H))
 
 VERIFIER_TESTS_H := $(OUTPUT)/verifier/tests.h
-$(OUTPUT)/test_verifier: $(VERIFIER_TESTS_H)
+test_verifier.c: $(VERIFIER_TESTS_H)
 $(OUTPUT)/test_verifier: CFLAGS += $(TEST_VERIFIER_CFLAGS)
 
 VERIFIER_TESTS_DIR = $(OUTPUT)/verifier
index 97d140961438fa5171175fa463a04301105771c6..e85d62cb53d0e9c37409a17235906d61dcb99a45 100644 (file)
@@ -9,14 +9,14 @@
 #define SEC(NAME) __attribute__((section(NAME), used))
 
 /* helper functions called from eBPF programs written in C */
-static void *(*bpf_map_lookup_elem)(void *map, void *key) =
+static void *(*bpf_map_lookup_elem)(void *map, const void *key) =
        (void *) BPF_FUNC_map_lookup_elem;
-static int (*bpf_map_update_elem)(void *map, void *key, void *value,
+static int (*bpf_map_update_elem)(void *map, const void *key, const void *value,
                                  unsigned long long flags) =
        (void *) BPF_FUNC_map_update_elem;
-static int (*bpf_map_delete_elem)(void *map, void *key) =
+static int (*bpf_map_delete_elem)(void *map, const void *key) =
        (void *) BPF_FUNC_map_delete_elem;
-static int (*bpf_map_push_elem)(void *map, void *value,
+static int (*bpf_map_push_elem)(void *map, const void *value,
                                unsigned long long flags) =
        (void *) BPF_FUNC_map_push_elem;
 static int (*bpf_map_pop_elem)(void *map, void *value) =
index a42f4fc4dc11f6f1a4319847599ff9fb2728b32b..8c976476f6fdc24e09999b8028df175a74ab1dc7 100644 (file)
@@ -25,3 +25,11 @@ CONFIG_XDP_SOCKETS=y
 CONFIG_FTRACE_SYSCALLS=y
 CONFIG_IPV6_TUNNEL=y
 CONFIG_IPV6_GRE=y
+CONFIG_NET_FOU=m
+CONFIG_NET_FOU_IP_TUNNELS=y
+CONFIG_IPV6_FOU=m
+CONFIG_IPV6_FOU_TUNNEL=m
+CONFIG_MPLS=y
+CONFIG_NET_MPLS_GSO=m
+CONFIG_MPLS_ROUTING=m
+CONFIG_MPLS_IPTUNNEL=m
index 77cafa66d0487840a81b02871d85f8d6b7082389..7136ab9ffa73a3ebbf6e7ac86199777e94995a4d 100644 (file)
@@ -52,7 +52,7 @@ static void detach_program(void)
        sprintf(command, "rm -r %s", cfg_pin_path);
        ret = system(command);
        if (ret)
-               error(1, errno, command);
+               error(1, errno, "%s", command);
 }
 
 static void parse_opts(int argc, char **argv)
index a64f7a02139c258c33704fcd6c575d9c341235c0..cb827383db4d5614799ca83b998a094b25415191 100644 (file)
@@ -73,7 +73,7 @@ void test_bpf_obj_id(void)
                          info_len != sizeof(struct bpf_map_info) ||
                          strcmp((char *)map_infos[i].name, expected_map_name),
                          "get-map-info(fd)",
-                         "err %d errno %d type %d(%d) info_len %u(%Zu) key_size %u value_size %u max_entries %u map_flags %X name %s(%s)\n",
+                         "err %d errno %d type %d(%d) info_len %u(%zu) key_size %u value_size %u max_entries %u map_flags %X name %s(%s)\n",
                          err, errno,
                          map_infos[i].type, BPF_MAP_TYPE_ARRAY,
                          info_len, sizeof(struct bpf_map_info),
@@ -117,7 +117,7 @@ void test_bpf_obj_id(void)
                          *(int *)(long)prog_infos[i].map_ids != map_infos[i].id ||
                          strcmp((char *)prog_infos[i].name, expected_prog_name),
                          "get-prog-info(fd)",
-                         "err %d errno %d i %d type %d(%d) info_len %u(%Zu) jit_enabled %d jited_prog_len %u xlated_prog_len %u jited_prog %d xlated_prog %d load_time %lu(%lu) uid %u(%u) nr_map_ids %u(%u) map_id %u(%u) name %s(%s)\n",
+                         "err %d errno %d i %d type %d(%d) info_len %u(%zu) jit_enabled %d jited_prog_len %u xlated_prog_len %u jited_prog %d xlated_prog %d load_time %lu(%lu) uid %u(%u) nr_map_ids %u(%u) map_id %u(%u) name %s(%s)\n",
                          err, errno, i,
                          prog_infos[i].type, BPF_PROG_TYPE_SOCKET_FILTER,
                          info_len, sizeof(struct bpf_prog_info),
@@ -185,7 +185,7 @@ void test_bpf_obj_id(void)
                      memcmp(&prog_info, &prog_infos[i], info_len) ||
                      *(int *)(long)prog_info.map_ids != saved_map_id,
                      "get-prog-info(next_id->fd)",
-                     "err %d errno %d info_len %u(%Zu) memcmp %d map_id %u(%u)\n",
+                     "err %d errno %d info_len %u(%zu) memcmp %d map_id %u(%u)\n",
                      err, errno, info_len, sizeof(struct bpf_prog_info),
                      memcmp(&prog_info, &prog_infos[i], info_len),
                      *(int *)(long)prog_info.map_ids, saved_map_id);
@@ -231,7 +231,7 @@ void test_bpf_obj_id(void)
                      memcmp(&map_info, &map_infos[i], info_len) ||
                      array_value != array_magic_value,
                      "check get-map-info(next_id->fd)",
-                     "err %d errno %d info_len %u(%Zu) memcmp %d array_value %llu(%llu)\n",
+                     "err %d errno %d info_len %u(%zu) memcmp %d array_value %llu(%llu)\n",
                      err, errno, info_len, sizeof(struct bpf_map_info),
                      memcmp(&map_info, &map_infos[i], info_len),
                      array_value, array_magic_value);
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c b/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c
new file mode 100644 (file)
index 0000000..23b159d
--- /dev/null
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019 Facebook
+#include <test_progs.h>
+static int libbpf_debug_print(enum libbpf_print_level level,
+                             const char *format, va_list args)
+{
+       if (level != LIBBPF_DEBUG)
+               return 0;
+
+       if (!strstr(format, "verifier log"))
+               return 0;
+       return vfprintf(stderr, "%s", args);
+}
+
+static int check_load(const char *file)
+{
+       struct bpf_prog_load_attr attr;
+       struct bpf_object *obj;
+       int err, prog_fd;
+
+       memset(&attr, 0, sizeof(struct bpf_prog_load_attr));
+       attr.file = file;
+       attr.prog_type = BPF_PROG_TYPE_SCHED_CLS;
+       attr.log_level = 4;
+       err = bpf_prog_load_xattr(&attr, &obj, &prog_fd);
+       bpf_object__close(obj);
+       if (err)
+               error_cnt++;
+       return err;
+}
+
+void test_bpf_verif_scale(void)
+{
+       const char *file1 = "./test_verif_scale1.o";
+       const char *file2 = "./test_verif_scale2.o";
+       const char *file3 = "./test_verif_scale3.o";
+       int err;
+
+       if (verifier_stats)
+               libbpf_set_print(libbpf_debug_print);
+
+       err = check_load(file1);
+       err |= check_load(file2);
+       err |= check_load(file3);
+       if (!err)
+               printf("test_verif_scale:OK\n");
+       else
+               printf("test_verif_scale:FAIL\n");
+}
index bcbd928c96aba4ab3a2a13984b1987f1497a412d..fc818bc1d7294454093a949b3e5f394ca9879982 100644 (file)
@@ -39,6 +39,58 @@ static struct bpf_flow_keys pkt_v6_flow_keys = {
        .n_proto = __bpf_constant_htons(ETH_P_IPV6),
 };
 
+#define VLAN_HLEN      4
+
+static struct {
+       struct ethhdr eth;
+       __u16 vlan_tci;
+       __u16 vlan_proto;
+       struct iphdr iph;
+       struct tcphdr tcp;
+} __packed pkt_vlan_v4 = {
+       .eth.h_proto = __bpf_constant_htons(ETH_P_8021Q),
+       .vlan_proto = __bpf_constant_htons(ETH_P_IP),
+       .iph.ihl = 5,
+       .iph.protocol = IPPROTO_TCP,
+       .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
+       .tcp.urg_ptr = 123,
+       .tcp.doff = 5,
+};
+
+static struct bpf_flow_keys pkt_vlan_v4_flow_keys = {
+       .nhoff = VLAN_HLEN,
+       .thoff = VLAN_HLEN + sizeof(struct iphdr),
+       .addr_proto = ETH_P_IP,
+       .ip_proto = IPPROTO_TCP,
+       .n_proto = __bpf_constant_htons(ETH_P_IP),
+};
+
+static struct {
+       struct ethhdr eth;
+       __u16 vlan_tci;
+       __u16 vlan_proto;
+       __u16 vlan_tci2;
+       __u16 vlan_proto2;
+       struct ipv6hdr iph;
+       struct tcphdr tcp;
+} __packed pkt_vlan_v6 = {
+       .eth.h_proto = __bpf_constant_htons(ETH_P_8021AD),
+       .vlan_proto = __bpf_constant_htons(ETH_P_8021Q),
+       .vlan_proto2 = __bpf_constant_htons(ETH_P_IPV6),
+       .iph.nexthdr = IPPROTO_TCP,
+       .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
+       .tcp.urg_ptr = 123,
+       .tcp.doff = 5,
+};
+
+static struct bpf_flow_keys pkt_vlan_v6_flow_keys = {
+       .nhoff = VLAN_HLEN * 2,
+       .thoff = VLAN_HLEN * 2 + sizeof(struct ipv6hdr),
+       .addr_proto = ETH_P_IPV6,
+       .ip_proto = IPPROTO_TCP,
+       .n_proto = __bpf_constant_htons(ETH_P_IPV6),
+};
+
 void test_flow_dissector(void)
 {
        struct bpf_flow_keys flow_keys;
@@ -68,5 +120,21 @@ void test_flow_dissector(void)
              err, errno, retval, duration, size, sizeof(flow_keys));
        CHECK_FLOW_KEYS("ipv6_flow_keys", flow_keys, pkt_v6_flow_keys);
 
+       err = bpf_prog_test_run(prog_fd, 10, &pkt_vlan_v4, sizeof(pkt_vlan_v4),
+                               &flow_keys, &size, &retval, &duration);
+       CHECK(size != sizeof(flow_keys) || err || retval != 1, "vlan_ipv4",
+             "err %d errno %d retval %d duration %d size %u/%lu\n",
+             err, errno, retval, duration, size, sizeof(flow_keys));
+       CHECK_FLOW_KEYS("vlan_ipv4_flow_keys", flow_keys,
+                       pkt_vlan_v4_flow_keys);
+
+       err = bpf_prog_test_run(prog_fd, 10, &pkt_vlan_v6, sizeof(pkt_vlan_v6),
+                               &flow_keys, &size, &retval, &duration);
+       CHECK(size != sizeof(flow_keys) || err || retval != 1, "vlan_ipv6",
+             "err %d errno %d retval %d duration %d size %u/%lu\n",
+             err, errno, retval, duration, size, sizeof(flow_keys));
+       CHECK_FLOW_KEYS("vlan_ipv6_flow_keys", flow_keys,
+                       pkt_vlan_v6_flow_keys);
+
        bpf_object__close(obj);
 }
index d7bb5beb1c57c112092b5e76e0755185f9303f8d..c2a0a9d5591b4b5ea3436bfe490689c7756e50db 100644 (file)
@@ -39,7 +39,7 @@ static int get_stack_print_output(void *data, int size)
                } else {
                        for (i = 0; i < num_stack; i++) {
                                ks = ksym_search(raw_data[i]);
-                               if (strcmp(ks->name, nonjit_func) == 0) {
+                               if (ks && (strcmp(ks->name, nonjit_func) == 0)) {
                                        found = true;
                                        break;
                                }
@@ -56,7 +56,7 @@ static int get_stack_print_output(void *data, int size)
                } else {
                        for (i = 0; i < num_stack; i++) {
                                ks = ksym_search(e->kern_stack[i]);
-                               if (strcmp(ks->name, nonjit_func) == 0) {
+                               if (ks && (strcmp(ks->name, nonjit_func) == 0)) {
                                        good_kern_stack = true;
                                        break;
                                }
diff --git a/tools/testing/selftests/bpf/prog_tests/global_data.c b/tools/testing/selftests/bpf/prog_tests/global_data.c
new file mode 100644 (file)
index 0000000..d011079
--- /dev/null
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+static void test_global_data_number(struct bpf_object *obj, __u32 duration)
+{
+       int i, err, map_fd;
+       uint64_t num;
+
+       map_fd = bpf_find_map(__func__, obj, "result_number");
+       if (map_fd < 0) {
+               error_cnt++;
+               return;
+       }
+
+       struct {
+               char *name;
+               uint32_t key;
+               uint64_t num;
+       } tests[] = {
+               { "relocate .bss reference",     0, 0 },
+               { "relocate .data reference",    1, 42 },
+               { "relocate .rodata reference",  2, 24 },
+               { "relocate .bss reference",     3, 0 },
+               { "relocate .data reference",    4, 0xffeeff },
+               { "relocate .rodata reference",  5, 0xabab },
+               { "relocate .bss reference",     6, 1234 },
+               { "relocate .bss reference",     7, 0 },
+               { "relocate .rodata reference",  8, 0xab },
+               { "relocate .rodata reference",  9, 0x1111111111111111 },
+               { "relocate .rodata reference", 10, ~0 },
+       };
+
+       for (i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) {
+               err = bpf_map_lookup_elem(map_fd, &tests[i].key, &num);
+               CHECK(err || num != tests[i].num, tests[i].name,
+                     "err %d result %lx expected %lx\n",
+                     err, num, tests[i].num);
+       }
+}
+
+static void test_global_data_string(struct bpf_object *obj, __u32 duration)
+{
+       int i, err, map_fd;
+       char str[32];
+
+       map_fd = bpf_find_map(__func__, obj, "result_string");
+       if (map_fd < 0) {
+               error_cnt++;
+               return;
+       }
+
+       struct {
+               char *name;
+               uint32_t key;
+               char str[32];
+       } tests[] = {
+               { "relocate .rodata reference", 0, "abcdefghijklmnopqrstuvwxyz" },
+               { "relocate .data reference",   1, "abcdefghijklmnopqrstuvwxyz" },
+               { "relocate .bss reference",    2, "" },
+               { "relocate .data reference",   3, "abcdexghijklmnopqrstuvwxyz" },
+               { "relocate .bss reference",    4, "\0\0hello" },
+       };
+
+       for (i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) {
+               err = bpf_map_lookup_elem(map_fd, &tests[i].key, str);
+               CHECK(err || memcmp(str, tests[i].str, sizeof(str)),
+                     tests[i].name, "err %d result \'%s\' expected \'%s\'\n",
+                     err, str, tests[i].str);
+       }
+}
+
+struct foo {
+       __u8  a;
+       __u32 b;
+       __u64 c;
+};
+
+static void test_global_data_struct(struct bpf_object *obj, __u32 duration)
+{
+       int i, err, map_fd;
+       struct foo val;
+
+       map_fd = bpf_find_map(__func__, obj, "result_struct");
+       if (map_fd < 0) {
+               error_cnt++;
+               return;
+       }
+
+       struct {
+               char *name;
+               uint32_t key;
+               struct foo val;
+       } tests[] = {
+               { "relocate .rodata reference", 0, { 42, 0xfefeefef, 0x1111111111111111ULL, } },
+               { "relocate .bss reference",    1, { } },
+               { "relocate .rodata reference", 2, { } },
+               { "relocate .data reference",   3, { 41, 0xeeeeefef, 0x2111111111111111ULL, } },
+       };
+
+       for (i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) {
+               err = bpf_map_lookup_elem(map_fd, &tests[i].key, &val);
+               CHECK(err || memcmp(&val, &tests[i].val, sizeof(val)),
+                     tests[i].name, "err %d result { %u, %u, %llu } expected { %u, %u, %llu }\n",
+                     err, val.a, val.b, val.c, tests[i].val.a, tests[i].val.b, tests[i].val.c);
+       }
+}
+
+static void test_global_data_rdonly(struct bpf_object *obj, __u32 duration)
+{
+       int err = -ENOMEM, map_fd, zero = 0;
+       struct bpf_map *map;
+       __u8 *buff;
+
+       map = bpf_object__find_map_by_name(obj, "test_glo.rodata");
+       if (!map || !bpf_map__is_internal(map)) {
+               error_cnt++;
+               return;
+       }
+
+       map_fd = bpf_map__fd(map);
+       if (map_fd < 0) {
+               error_cnt++;
+               return;
+       }
+
+       buff = malloc(bpf_map__def(map)->value_size);
+       if (buff)
+               err = bpf_map_update_elem(map_fd, &zero, buff, 0);
+       free(buff);
+       CHECK(!err || errno != EPERM, "test .rodata read-only map",
+             "err %d errno %d\n", err, errno);
+}
+
+void test_global_data(void)
+{
+       const char *file = "./test_global_data.o";
+       __u32 duration = 0, retval;
+       struct bpf_object *obj;
+       int err, prog_fd;
+
+       err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
+       if (CHECK(err, "load program", "error %d loading %s\n", err, file))
+               return;
+
+       err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
+                               NULL, NULL, &retval, &duration);
+       CHECK(err || retval, "pass global data run",
+             "err %d errno %d retval %d duration %d\n",
+             err, errno, retval, duration);
+
+       test_global_data_number(obj, duration);
+       test_global_data_string(obj, duration);
+       test_global_data_struct(obj, duration);
+       test_global_data_rdonly(obj, duration);
+
+       bpf_object__close(obj);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/skb_ctx.c b/tools/testing/selftests/bpf/prog_tests/skb_ctx.c
new file mode 100644 (file)
index 0000000..e95baa3
--- /dev/null
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+void test_skb_ctx(void)
+{
+       struct __sk_buff skb = {
+               .cb[0] = 1,
+               .cb[1] = 2,
+               .cb[2] = 3,
+               .cb[3] = 4,
+               .cb[4] = 5,
+               .priority = 6,
+       };
+       struct bpf_prog_test_run_attr tattr = {
+               .data_in = &pkt_v4,
+               .data_size_in = sizeof(pkt_v4),
+               .ctx_in = &skb,
+               .ctx_size_in = sizeof(skb),
+               .ctx_out = &skb,
+               .ctx_size_out = sizeof(skb),
+       };
+       struct bpf_object *obj;
+       int err;
+       int i;
+
+       err = bpf_prog_load("./test_skb_ctx.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
+                           &tattr.prog_fd);
+       if (CHECK_ATTR(err, "load", "err %d errno %d\n", err, errno))
+               return;
+
+       /* ctx_in != NULL, ctx_size_in == 0 */
+
+       tattr.ctx_size_in = 0;
+       err = bpf_prog_test_run_xattr(&tattr);
+       CHECK_ATTR(err == 0, "ctx_size_in", "err %d errno %d\n", err, errno);
+       tattr.ctx_size_in = sizeof(skb);
+
+       /* ctx_out != NULL, ctx_size_out == 0 */
+
+       tattr.ctx_size_out = 0;
+       err = bpf_prog_test_run_xattr(&tattr);
+       CHECK_ATTR(err == 0, "ctx_size_out", "err %d errno %d\n", err, errno);
+       tattr.ctx_size_out = sizeof(skb);
+
+       /* non-zero [len, tc_index] fields should be rejected*/
+
+       skb.len = 1;
+       err = bpf_prog_test_run_xattr(&tattr);
+       CHECK_ATTR(err == 0, "len", "err %d errno %d\n", err, errno);
+       skb.len = 0;
+
+       skb.tc_index = 1;
+       err = bpf_prog_test_run_xattr(&tattr);
+       CHECK_ATTR(err == 0, "tc_index", "err %d errno %d\n", err, errno);
+       skb.tc_index = 0;
+
+       /* non-zero [hash, sk] fields should be rejected */
+
+       skb.hash = 1;
+       err = bpf_prog_test_run_xattr(&tattr);
+       CHECK_ATTR(err == 0, "hash", "err %d errno %d\n", err, errno);
+       skb.hash = 0;
+
+       skb.sk = (struct bpf_sock *)1;
+       err = bpf_prog_test_run_xattr(&tattr);
+       CHECK_ATTR(err == 0, "sk", "err %d errno %d\n", err, errno);
+       skb.sk = 0;
+
+       err = bpf_prog_test_run_xattr(&tattr);
+       CHECK_ATTR(err != 0 || tattr.retval,
+                  "run",
+                  "err %d errno %d retval %d\n",
+                  err, errno, tattr.retval);
+
+       CHECK_ATTR(tattr.ctx_size_out != sizeof(skb),
+                  "ctx_size_out",
+                  "incorrect output size, want %lu have %u\n",
+                  sizeof(skb), tattr.ctx_size_out);
+
+       for (i = 0; i < 5; i++)
+               CHECK_ATTR(skb.cb[i] != i + 2,
+                          "ctx_out_cb",
+                          "skb->cb[i] == %d, expected %d\n",
+                          skb.cb[i], i + 2);
+       CHECK_ATTR(skb.priority != 7,
+                  "ctx_out_priority",
+                  "skb->priority == %d, expected %d\n",
+                  skb.priority, 7);
+}
index 284660f5aa9533aaee034e13242f5ea098729676..75b17cada53937e5e237e00049a1bc22176cb83c 100644 (file)
@@ -92,7 +92,6 @@ static __always_inline int parse_eth_proto(struct __sk_buff *skb, __be16 proto)
 {
        struct bpf_flow_keys *keys = skb->flow_keys;
 
-       keys->n_proto = proto;
        switch (proto) {
        case bpf_htons(ETH_P_IP):
                bpf_tail_call(skb, &jmp_table, IP);
@@ -119,10 +118,9 @@ static __always_inline int parse_eth_proto(struct __sk_buff *skb, __be16 proto)
 SEC("flow_dissector")
 int _dissect(struct __sk_buff *skb)
 {
-       if (!skb->vlan_present)
-               return parse_eth_proto(skb, skb->protocol);
-       else
-               return parse_eth_proto(skb, skb->vlan_proto);
+       struct bpf_flow_keys *keys = skb->flow_keys;
+
+       return parse_eth_proto(skb, keys->n_proto);
 }
 
 /* Parses on IPPROTO_* */
@@ -336,15 +334,9 @@ PROG(VLAN)(struct __sk_buff *skb)
 {
        struct bpf_flow_keys *keys = skb->flow_keys;
        struct vlan_hdr *vlan, _vlan;
-       __be16 proto;
-
-       /* Peek back to see if single or double-tagging */
-       if (bpf_skb_load_bytes(skb, keys->thoff - sizeof(proto), &proto,
-                              sizeof(proto)))
-               return BPF_DROP;
 
        /* Account for double-tagging */
-       if (proto == bpf_htons(ETH_P_8021AD)) {
+       if (keys->n_proto == bpf_htons(ETH_P_8021AD)) {
                vlan = bpf_flow_dissect_get_header(skb, sizeof(*vlan), &_vlan);
                if (!vlan)
                        return BPF_DROP;
@@ -352,6 +344,7 @@ PROG(VLAN)(struct __sk_buff *skb)
                if (vlan->h_vlan_encapsulated_proto != bpf_htons(ETH_P_8021Q))
                        return BPF_DROP;
 
+               keys->nhoff += sizeof(*vlan);
                keys->thoff += sizeof(*vlan);
        }
 
@@ -359,12 +352,14 @@ PROG(VLAN)(struct __sk_buff *skb)
        if (!vlan)
                return BPF_DROP;
 
+       keys->nhoff += sizeof(*vlan);
        keys->thoff += sizeof(*vlan);
        /* Only allow 8021AD + 8021Q double tagging and no triple tagging.*/
        if (vlan->h_vlan_encapsulated_proto == bpf_htons(ETH_P_8021AD) ||
            vlan->h_vlan_encapsulated_proto == bpf_htons(ETH_P_8021Q))
                return BPF_DROP;
 
+       keys->n_proto = vlan->h_vlan_encapsulated_proto;
        return parse_eth_proto(skb, vlan->h_vlan_encapsulated_proto);
 }
 
diff --git a/tools/testing/selftests/bpf/progs/test_global_data.c b/tools/testing/selftests/bpf/progs/test_global_data.c
new file mode 100644 (file)
index 0000000..5ab14e9
--- /dev/null
@@ -0,0 +1,106 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019 Isovalent, Inc.
+
+#include <linux/bpf.h>
+#include <linux/pkt_cls.h>
+#include <string.h>
+
+#include "bpf_helpers.h"
+
+struct bpf_map_def SEC("maps") result_number = {
+       .type           = BPF_MAP_TYPE_ARRAY,
+       .key_size       = sizeof(__u32),
+       .value_size     = sizeof(__u64),
+       .max_entries    = 11,
+};
+
+struct bpf_map_def SEC("maps") result_string = {
+       .type           = BPF_MAP_TYPE_ARRAY,
+       .key_size       = sizeof(__u32),
+       .value_size     = 32,
+       .max_entries    = 5,
+};
+
+struct foo {
+       __u8  a;
+       __u32 b;
+       __u64 c;
+};
+
+struct bpf_map_def SEC("maps") result_struct = {
+       .type           = BPF_MAP_TYPE_ARRAY,
+       .key_size       = sizeof(__u32),
+       .value_size     = sizeof(struct foo),
+       .max_entries    = 5,
+};
+
+/* Relocation tests for __u64s. */
+static       __u64 num0;
+static       __u64 num1 = 42;
+static const __u64 num2 = 24;
+static       __u64 num3 = 0;
+static       __u64 num4 = 0xffeeff;
+static const __u64 num5 = 0xabab;
+static const __u64 num6 = 0xab;
+
+/* Relocation tests for strings. */
+static const char str0[32] = "abcdefghijklmnopqrstuvwxyz";
+static       char str1[32] = "abcdefghijklmnopqrstuvwxyz";
+static       char str2[32];
+
+/* Relocation tests for structs. */
+static const struct foo struct0 = {
+       .a = 42,
+       .b = 0xfefeefef,
+       .c = 0x1111111111111111ULL,
+};
+static struct foo struct1;
+static const struct foo struct2;
+static struct foo struct3 = {
+       .a = 41,
+       .b = 0xeeeeefef,
+       .c = 0x2111111111111111ULL,
+};
+
+#define test_reloc(map, num, var)                                      \
+       do {                                                            \
+               __u32 key = num;                                        \
+               bpf_map_update_elem(&result_##map, &key, var, 0);       \
+       } while (0)
+
+SEC("static_data_load")
+int load_static_data(struct __sk_buff *skb)
+{
+       static const __u64 bar = ~0;
+
+       test_reloc(number, 0, &num0);
+       test_reloc(number, 1, &num1);
+       test_reloc(number, 2, &num2);
+       test_reloc(number, 3, &num3);
+       test_reloc(number, 4, &num4);
+       test_reloc(number, 5, &num5);
+       num4 = 1234;
+       test_reloc(number, 6, &num4);
+       test_reloc(number, 7, &num0);
+       test_reloc(number, 8, &num6);
+
+       test_reloc(string, 0, str0);
+       test_reloc(string, 1, str1);
+       test_reloc(string, 2, str2);
+       str1[5] = 'x';
+       test_reloc(string, 3, str1);
+       __builtin_memcpy(&str2[2], "hello", sizeof("hello"));
+       test_reloc(string, 4, str2);
+
+       test_reloc(struct, 0, &struct0);
+       test_reloc(struct, 1, &struct1);
+       test_reloc(struct, 2, &struct2);
+       test_reloc(struct, 3, &struct3);
+
+       test_reloc(number,  9, &struct0.c);
+       test_reloc(number, 10, &bar);
+
+       return TC_ACT_OK;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_jhash.h b/tools/testing/selftests/bpf/progs/test_jhash.h
new file mode 100644 (file)
index 0000000..3d12c11
--- /dev/null
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019 Facebook
+
+typedef unsigned int u32;
+
+static __attribute__((always_inline)) u32 rol32(u32 word, unsigned int shift)
+{
+       return (word << shift) | (word >> ((-shift) & 31));
+}
+
+#define __jhash_mix(a, b, c)                   \
+{                                              \
+       a -= c;  a ^= rol32(c, 4);  c += b;     \
+       b -= a;  b ^= rol32(a, 6);  a += c;     \
+       c -= b;  c ^= rol32(b, 8);  b += a;     \
+       a -= c;  a ^= rol32(c, 16); c += b;     \
+       b -= a;  b ^= rol32(a, 19); a += c;     \
+       c -= b;  c ^= rol32(b, 4);  b += a;     \
+}
+
+#define __jhash_final(a, b, c)                 \
+{                                              \
+       c ^= b; c -= rol32(b, 14);              \
+       a ^= c; a -= rol32(c, 11);              \
+       b ^= a; b -= rol32(a, 25);              \
+       c ^= b; c -= rol32(b, 16);              \
+       a ^= c; a -= rol32(c, 4);               \
+       b ^= a; b -= rol32(a, 14);              \
+       c ^= b; c -= rol32(b, 24);              \
+}
+
+#define JHASH_INITVAL          0xdeadbeef
+
+static ATTR
+u32 jhash(const void *key, u32 length, u32 initval)
+{
+       u32 a, b, c;
+       const unsigned char *k = key;
+
+       a = b = c = JHASH_INITVAL + length + initval;
+
+       while (length > 12) {
+               a += *(volatile u32 *)(k);
+               b += *(volatile u32 *)(k + 4);
+               c += *(volatile u32 *)(k + 8);
+               __jhash_mix(a, b, c);
+               length -= 12;
+               k += 12;
+       }
+       switch (length) {
+       case 12: c += (u32)k[11]<<24;
+       case 11: c += (u32)k[10]<<16;
+       case 10: c += (u32)k[9]<<8;
+       case 9:  c += k[8];
+       case 8:  b += (u32)k[7]<<24;
+       case 7:  b += (u32)k[6]<<16;
+       case 6:  b += (u32)k[5]<<8;
+       case 5:  b += k[4];
+       case 4:  a += (u32)k[3]<<24;
+       case 3:  a += (u32)k[2]<<16;
+       case 2:  a += (u32)k[1]<<8;
+       case 1:  a += k[0];
+                c ^= a;
+                __jhash_final(a, b, c);
+       case 0: /* Nothing left to add */
+               break;
+       }
+
+       return c;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_skb_ctx.c b/tools/testing/selftests/bpf/progs/test_skb_ctx.c
new file mode 100644 (file)
index 0000000..7a80960
--- /dev/null
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include "bpf_helpers.h"
+
+int _version SEC("version") = 1;
+char _license[] SEC("license") = "GPL";
+
+SEC("skb_ctx")
+int process(struct __sk_buff *skb)
+{
+       #pragma clang loop unroll(full)
+       for (int i = 0; i < 5; i++) {
+               if (skb->cb[i] != i + 1)
+                       return 1;
+               skb->cb[i]++;
+       }
+       skb->priority++;
+
+       return 0;
+}
index f541c2de947d2bbacadf9a3a626ce3c097e8ec50..bcb00d737e953b47e062cfc727d0c9437ef631a8 100644 (file)
@@ -11,7 +11,9 @@
 #include <linux/in.h>
 #include <linux/ip.h>
 #include <linux/ipv6.h>
+#include <linux/mpls.h>
 #include <linux/tcp.h>
+#include <linux/udp.h>
 #include <linux/pkt_cls.h>
 #include <linux/types.h>
 
 
 static const int cfg_port = 8000;
 
-struct grev4hdr {
-       struct iphdr ip;
+static const int cfg_udp_src = 20000;
+
+#define        UDP_PORT                5555
+#define        MPLS_OVER_UDP_PORT      6635
+#define        ETH_OVER_UDP_PORT       7777
+
+/* MPLS label 1000 with S bit (last label) set and ttl of 255. */
+static const __u32 mpls_label = __bpf_constant_htonl(1000 << 12 |
+                                                    MPLS_LS_S_MASK | 0xff);
+
+struct gre_hdr {
        __be16 flags;
        __be16 protocol;
 } __attribute__((packed));
 
-struct grev6hdr {
+union l4hdr {
+       struct udphdr udp;
+       struct gre_hdr gre;
+};
+
+struct v4hdr {
+       struct iphdr ip;
+       union l4hdr l4hdr;
+       __u8 pad[16];                   /* enough space for L2 header */
+} __attribute__((packed));
+
+struct v6hdr {
        struct ipv6hdr ip;
-       __be16 flags;
-       __be16 protocol;
+       union l4hdr l4hdr;
+       __u8 pad[16];                   /* enough space for L2 header */
 } __attribute__((packed));
 
 static __always_inline void set_ipv4_csum(struct iphdr *iph)
@@ -47,13 +69,15 @@ static __always_inline void set_ipv4_csum(struct iphdr *iph)
        iph->check = ~((csum & 0xffff) + (csum >> 16));
 }
 
-static __always_inline int encap_ipv4(struct __sk_buff *skb, bool with_gre)
+static __always_inline int encap_ipv4(struct __sk_buff *skb, __u8 encap_proto,
+                                     __u16 l2_proto)
 {
-       struct grev4hdr h_outer;
+       __u16 udp_dst = UDP_PORT;
        struct iphdr iph_inner;
+       struct v4hdr h_outer;
        struct tcphdr tcph;
+       int olen, l2_len;
        __u64 flags;
-       int olen;
 
        if (bpf_skb_load_bytes(skb, ETH_HLEN, &iph_inner,
                               sizeof(iph_inner)) < 0)
@@ -70,13 +94,58 @@ static __always_inline int encap_ipv4(struct __sk_buff *skb, bool with_gre)
        if (tcph.dest != __bpf_constant_htons(cfg_port))
                return TC_ACT_OK;
 
+       olen = sizeof(h_outer.ip);
+       l2_len = 0;
+
        flags = BPF_F_ADJ_ROOM_FIXED_GSO | BPF_F_ADJ_ROOM_ENCAP_L3_IPV4;
-       if (with_gre) {
+
+       switch (l2_proto) {
+       case ETH_P_MPLS_UC:
+               l2_len = sizeof(mpls_label);
+               udp_dst = MPLS_OVER_UDP_PORT;
+               break;
+       case ETH_P_TEB:
+               l2_len = ETH_HLEN;
+               udp_dst = ETH_OVER_UDP_PORT;
+               break;
+       }
+       flags |= BPF_F_ADJ_ROOM_ENCAP_L2(l2_len);
+
+       switch (encap_proto) {
+       case IPPROTO_GRE:
                flags |= BPF_F_ADJ_ROOM_ENCAP_L4_GRE;
-               olen = sizeof(h_outer);
-       } else {
-               olen = sizeof(h_outer.ip);
+               olen += sizeof(h_outer.l4hdr.gre);
+               h_outer.l4hdr.gre.protocol = bpf_htons(l2_proto);
+               h_outer.l4hdr.gre.flags = 0;
+               break;
+       case IPPROTO_UDP:
+               flags |= BPF_F_ADJ_ROOM_ENCAP_L4_UDP;
+               olen += sizeof(h_outer.l4hdr.udp);
+               h_outer.l4hdr.udp.source = __bpf_constant_htons(cfg_udp_src);
+               h_outer.l4hdr.udp.dest = bpf_htons(udp_dst);
+               h_outer.l4hdr.udp.check = 0;
+               h_outer.l4hdr.udp.len = bpf_htons(bpf_ntohs(iph_inner.tot_len) +
+                                                 sizeof(h_outer.l4hdr.udp) +
+                                                 l2_len);
+               break;
+       case IPPROTO_IPIP:
+               break;
+       default:
+               return TC_ACT_OK;
+       }
+
+       /* add L2 encap (if specified) */
+       switch (l2_proto) {
+       case ETH_P_MPLS_UC:
+               *((__u32 *)((__u8 *)&h_outer + olen)) = mpls_label;
+               break;
+       case ETH_P_TEB:
+               if (bpf_skb_load_bytes(skb, 0, (__u8 *)&h_outer + olen,
+                                      ETH_HLEN))
+                       return TC_ACT_SHOT;
+               break;
        }
+       olen += l2_len;
 
        /* add room between mac and network header */
        if (bpf_skb_adjust_room(skb, olen, BPF_ADJ_ROOM_MAC, flags))
@@ -85,16 +154,10 @@ static __always_inline int encap_ipv4(struct __sk_buff *skb, bool with_gre)
        /* prepare new outer network header */
        h_outer.ip = iph_inner;
        h_outer.ip.tot_len = bpf_htons(olen +
-                                     bpf_htons(h_outer.ip.tot_len));
-       if (with_gre) {
-               h_outer.ip.protocol = IPPROTO_GRE;
-               h_outer.protocol = bpf_htons(ETH_P_IP);
-               h_outer.flags = 0;
-       } else {
-               h_outer.ip.protocol = IPPROTO_IPIP;
-       }
+                                      bpf_ntohs(h_outer.ip.tot_len));
+       h_outer.ip.protocol = encap_proto;
 
-       set_ipv4_csum((void *)&h_outer.ip);
+       set_ipv4_csum(&h_outer.ip);
 
        /* store new outer network header */
        if (bpf_skb_store_bytes(skb, ETH_HLEN, &h_outer, olen,
@@ -104,13 +167,16 @@ static __always_inline int encap_ipv4(struct __sk_buff *skb, bool with_gre)
        return TC_ACT_OK;
 }
 
-static __always_inline int encap_ipv6(struct __sk_buff *skb, bool with_gre)
+static __always_inline int encap_ipv6(struct __sk_buff *skb, __u8 encap_proto,
+                                     __u16 l2_proto)
 {
+       __u16 udp_dst = UDP_PORT;
        struct ipv6hdr iph_inner;
-       struct grev6hdr h_outer;
+       struct v6hdr h_outer;
        struct tcphdr tcph;
+       int olen, l2_len;
+       __u16 tot_len;
        __u64 flags;
-       int olen;
 
        if (bpf_skb_load_bytes(skb, ETH_HLEN, &iph_inner,
                               sizeof(iph_inner)) < 0)
@@ -124,14 +190,58 @@ static __always_inline int encap_ipv6(struct __sk_buff *skb, bool with_gre)
        if (tcph.dest != __bpf_constant_htons(cfg_port))
                return TC_ACT_OK;
 
+       olen = sizeof(h_outer.ip);
+       l2_len = 0;
+
        flags = BPF_F_ADJ_ROOM_FIXED_GSO | BPF_F_ADJ_ROOM_ENCAP_L3_IPV6;
-       if (with_gre) {
+
+       switch (l2_proto) {
+       case ETH_P_MPLS_UC:
+               l2_len = sizeof(mpls_label);
+               udp_dst = MPLS_OVER_UDP_PORT;
+               break;
+       case ETH_P_TEB:
+               l2_len = ETH_HLEN;
+               udp_dst = ETH_OVER_UDP_PORT;
+               break;
+       }
+       flags |= BPF_F_ADJ_ROOM_ENCAP_L2(l2_len);
+
+       switch (encap_proto) {
+       case IPPROTO_GRE:
                flags |= BPF_F_ADJ_ROOM_ENCAP_L4_GRE;
-               olen = sizeof(h_outer);
-       } else {
-               olen = sizeof(h_outer.ip);
+               olen += sizeof(h_outer.l4hdr.gre);
+               h_outer.l4hdr.gre.protocol = bpf_htons(l2_proto);
+               h_outer.l4hdr.gre.flags = 0;
+               break;
+       case IPPROTO_UDP:
+               flags |= BPF_F_ADJ_ROOM_ENCAP_L4_UDP;
+               olen += sizeof(h_outer.l4hdr.udp);
+               h_outer.l4hdr.udp.source = __bpf_constant_htons(cfg_udp_src);
+               h_outer.l4hdr.udp.dest = bpf_htons(udp_dst);
+               tot_len = bpf_ntohs(iph_inner.payload_len) + sizeof(iph_inner) +
+                         sizeof(h_outer.l4hdr.udp);
+               h_outer.l4hdr.udp.check = 0;
+               h_outer.l4hdr.udp.len = bpf_htons(tot_len);
+               break;
+       case IPPROTO_IPV6:
+               break;
+       default:
+               return TC_ACT_OK;
        }
 
+       /* add L2 encap (if specified) */
+       switch (l2_proto) {
+       case ETH_P_MPLS_UC:
+               *((__u32 *)((__u8 *)&h_outer + olen)) = mpls_label;
+               break;
+       case ETH_P_TEB:
+               if (bpf_skb_load_bytes(skb, 0, (__u8 *)&h_outer + olen,
+                                      ETH_HLEN))
+                       return TC_ACT_SHOT;
+               break;
+       }
+       olen += l2_len;
 
        /* add room between mac and network header */
        if (bpf_skb_adjust_room(skb, olen, BPF_ADJ_ROOM_MAC, flags))
@@ -141,13 +251,8 @@ static __always_inline int encap_ipv6(struct __sk_buff *skb, bool with_gre)
        h_outer.ip = iph_inner;
        h_outer.ip.payload_len = bpf_htons(olen +
                                           bpf_ntohs(h_outer.ip.payload_len));
-       if (with_gre) {
-               h_outer.ip.nexthdr = IPPROTO_GRE;
-               h_outer.protocol = bpf_htons(ETH_P_IPV6);
-               h_outer.flags = 0;
-       } else {
-               h_outer.ip.nexthdr = IPPROTO_IPV6;
-       }
+
+       h_outer.ip.nexthdr = encap_proto;
 
        /* store new outer network header */
        if (bpf_skb_store_bytes(skb, ETH_HLEN, &h_outer, olen,
@@ -157,54 +262,168 @@ static __always_inline int encap_ipv6(struct __sk_buff *skb, bool with_gre)
        return TC_ACT_OK;
 }
 
-SEC("encap_ipip")
-int __encap_ipip(struct __sk_buff *skb)
+SEC("encap_ipip_none")
+int __encap_ipip_none(struct __sk_buff *skb)
 {
        if (skb->protocol == __bpf_constant_htons(ETH_P_IP))
-               return encap_ipv4(skb, false);
+               return encap_ipv4(skb, IPPROTO_IPIP, ETH_P_IP);
        else
                return TC_ACT_OK;
 }
 
-SEC("encap_gre")
-int __encap_gre(struct __sk_buff *skb)
+SEC("encap_gre_none")
+int __encap_gre_none(struct __sk_buff *skb)
 {
        if (skb->protocol == __bpf_constant_htons(ETH_P_IP))
-               return encap_ipv4(skb, true);
+               return encap_ipv4(skb, IPPROTO_GRE, ETH_P_IP);
        else
                return TC_ACT_OK;
 }
 
-SEC("encap_ip6tnl")
-int __encap_ip6tnl(struct __sk_buff *skb)
+SEC("encap_gre_mpls")
+int __encap_gre_mpls(struct __sk_buff *skb)
+{
+       if (skb->protocol == __bpf_constant_htons(ETH_P_IP))
+               return encap_ipv4(skb, IPPROTO_GRE, ETH_P_MPLS_UC);
+       else
+               return TC_ACT_OK;
+}
+
+SEC("encap_gre_eth")
+int __encap_gre_eth(struct __sk_buff *skb)
+{
+       if (skb->protocol == __bpf_constant_htons(ETH_P_IP))
+               return encap_ipv4(skb, IPPROTO_GRE, ETH_P_TEB);
+       else
+               return TC_ACT_OK;
+}
+
+SEC("encap_udp_none")
+int __encap_udp_none(struct __sk_buff *skb)
+{
+       if (skb->protocol == __bpf_constant_htons(ETH_P_IP))
+               return encap_ipv4(skb, IPPROTO_UDP, ETH_P_IP);
+       else
+               return TC_ACT_OK;
+}
+
+SEC("encap_udp_mpls")
+int __encap_udp_mpls(struct __sk_buff *skb)
+{
+       if (skb->protocol == __bpf_constant_htons(ETH_P_IP))
+               return encap_ipv4(skb, IPPROTO_UDP, ETH_P_MPLS_UC);
+       else
+               return TC_ACT_OK;
+}
+
+SEC("encap_udp_eth")
+int __encap_udp_eth(struct __sk_buff *skb)
+{
+       if (skb->protocol == __bpf_constant_htons(ETH_P_IP))
+               return encap_ipv4(skb, IPPROTO_UDP, ETH_P_TEB);
+       else
+               return TC_ACT_OK;
+}
+
+SEC("encap_ip6tnl_none")
+int __encap_ip6tnl_none(struct __sk_buff *skb)
 {
        if (skb->protocol == __bpf_constant_htons(ETH_P_IPV6))
-               return encap_ipv6(skb, false);
+               return encap_ipv6(skb, IPPROTO_IPV6, ETH_P_IPV6);
        else
                return TC_ACT_OK;
 }
 
-SEC("encap_ip6gre")
-int __encap_ip6gre(struct __sk_buff *skb)
+SEC("encap_ip6gre_none")
+int __encap_ip6gre_none(struct __sk_buff *skb)
 {
        if (skb->protocol == __bpf_constant_htons(ETH_P_IPV6))
-               return encap_ipv6(skb, true);
+               return encap_ipv6(skb, IPPROTO_GRE, ETH_P_IPV6);
+       else
+               return TC_ACT_OK;
+}
+
+SEC("encap_ip6gre_mpls")
+int __encap_ip6gre_mpls(struct __sk_buff *skb)
+{
+       if (skb->protocol == __bpf_constant_htons(ETH_P_IPV6))
+               return encap_ipv6(skb, IPPROTO_GRE, ETH_P_MPLS_UC);
+       else
+               return TC_ACT_OK;
+}
+
+SEC("encap_ip6gre_eth")
+int __encap_ip6gre_eth(struct __sk_buff *skb)
+{
+       if (skb->protocol == __bpf_constant_htons(ETH_P_IPV6))
+               return encap_ipv6(skb, IPPROTO_GRE, ETH_P_TEB);
+       else
+               return TC_ACT_OK;
+}
+
+SEC("encap_ip6udp_none")
+int __encap_ip6udp_none(struct __sk_buff *skb)
+{
+       if (skb->protocol == __bpf_constant_htons(ETH_P_IPV6))
+               return encap_ipv6(skb, IPPROTO_UDP, ETH_P_IPV6);
+       else
+               return TC_ACT_OK;
+}
+
+SEC("encap_ip6udp_mpls")
+int __encap_ip6udp_mpls(struct __sk_buff *skb)
+{
+       if (skb->protocol == __bpf_constant_htons(ETH_P_IPV6))
+               return encap_ipv6(skb, IPPROTO_UDP, ETH_P_MPLS_UC);
+       else
+               return TC_ACT_OK;
+}
+
+SEC("encap_ip6udp_eth")
+int __encap_ip6udp_eth(struct __sk_buff *skb)
+{
+       if (skb->protocol == __bpf_constant_htons(ETH_P_IPV6))
+               return encap_ipv6(skb, IPPROTO_UDP, ETH_P_TEB);
        else
                return TC_ACT_OK;
 }
 
 static int decap_internal(struct __sk_buff *skb, int off, int len, char proto)
 {
-       char buf[sizeof(struct grev6hdr)];
-       int olen;
+       char buf[sizeof(struct v6hdr)];
+       struct gre_hdr greh;
+       struct udphdr udph;
+       int olen = len;
 
        switch (proto) {
        case IPPROTO_IPIP:
        case IPPROTO_IPV6:
-               olen = len;
                break;
        case IPPROTO_GRE:
-               olen = len + 4 /* gre hdr */;
+               olen += sizeof(struct gre_hdr);
+               if (bpf_skb_load_bytes(skb, off + len, &greh, sizeof(greh)) < 0)
+                       return TC_ACT_OK;
+               switch (bpf_ntohs(greh.protocol)) {
+               case ETH_P_MPLS_UC:
+                       olen += sizeof(mpls_label);
+                       break;
+               case ETH_P_TEB:
+                       olen += ETH_HLEN;
+                       break;
+               }
+               break;
+       case IPPROTO_UDP:
+               olen += sizeof(struct udphdr);
+               if (bpf_skb_load_bytes(skb, off + len, &udph, sizeof(udph)) < 0)
+                       return TC_ACT_OK;
+               switch (bpf_ntohs(udph.dest)) {
+               case MPLS_OVER_UDP_PORT:
+                       olen += sizeof(mpls_label);
+                       break;
+               case ETH_OVER_UDP_PORT:
+                       olen += ETH_HLEN;
+                       break;
+               }
                break;
        default:
                return TC_ACT_OK;
diff --git a/tools/testing/selftests/bpf/progs/test_verif_scale1.c b/tools/testing/selftests/bpf/progs/test_verif_scale1.c
new file mode 100644 (file)
index 0000000..f3236ce
--- /dev/null
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019 Facebook
+#include <linux/bpf.h>
+#include "bpf_helpers.h"
+#define ATTR __attribute__((noinline))
+#include "test_jhash.h"
+
+SEC("scale90_noinline")
+int balancer_ingress(struct __sk_buff *ctx)
+{
+       void *data_end = (void *)(long)ctx->data_end;
+       void *data = (void *)(long)ctx->data;
+       void *ptr;
+       int ret = 0, nh_off, i = 0;
+
+       nh_off = 14;
+
+       /* pragma unroll doesn't work on large loops */
+
+#define C do { \
+       ptr = data + i; \
+       if (ptr + nh_off > data_end) \
+               break; \
+       ctx->tc_index = jhash(ptr, nh_off, ctx->cb[0] + i++); \
+       } while (0);
+#define C30 C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;
+       C30;C30;C30; /* 90 calls */
+       return 0;
+}
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_verif_scale2.c b/tools/testing/selftests/bpf/progs/test_verif_scale2.c
new file mode 100644 (file)
index 0000000..7783069
--- /dev/null
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019 Facebook
+#include <linux/bpf.h>
+#include "bpf_helpers.h"
+#define ATTR __attribute__((always_inline))
+#include "test_jhash.h"
+
+SEC("scale90_inline")
+int balancer_ingress(struct __sk_buff *ctx)
+{
+       void *data_end = (void *)(long)ctx->data_end;
+       void *data = (void *)(long)ctx->data;
+       void *ptr;
+       int ret = 0, nh_off, i = 0;
+
+       nh_off = 14;
+
+       /* pragma unroll doesn't work on large loops */
+
+#define C do { \
+       ptr = data + i; \
+       if (ptr + nh_off > data_end) \
+               break; \
+       ctx->tc_index = jhash(ptr, nh_off, ctx->cb[0] + i++); \
+       } while (0);
+#define C30 C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;
+       C30;C30;C30; /* 90 calls */
+       return 0;
+}
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_verif_scale3.c b/tools/testing/selftests/bpf/progs/test_verif_scale3.c
new file mode 100644 (file)
index 0000000..1848da0
--- /dev/null
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019 Facebook
+#include <linux/bpf.h>
+#include "bpf_helpers.h"
+#define ATTR __attribute__((noinline))
+#include "test_jhash.h"
+
+SEC("scale90_noinline32")
+int balancer_ingress(struct __sk_buff *ctx)
+{
+       void *data_end = (void *)(long)ctx->data_end;
+       void *data = (void *)(long)ctx->data;
+       void *ptr;
+       int ret = 0, nh_off, i = 0;
+
+       nh_off = 32;
+
+       /* pragma unroll doesn't work on large loops */
+
+#define C do { \
+       ptr = data + i; \
+       if (ptr + nh_off > data_end) \
+               break; \
+       ctx->tc_index = jhash(ptr, nh_off, ctx->cb[0] + i++); \
+       } while (0);
+#define C30 C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;
+       C30;C30;C30; /* 90 calls */
+       return 0;
+}
+char _license[] SEC("license") = "GPL";
index 23e3b314ca603956ce88ed4ac8f1512eeddfa34f..44cd3378d216605c82de8f4207efc5093a46917d 100644 (file)
@@ -85,6 +85,11 @@ static int __base_pr(enum libbpf_print_level level __attribute__((unused)),
 #define BTF_UNION_ENC(name, nr_elems, sz)      \
        BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_UNION, 0, nr_elems), sz)
 
+#define BTF_VAR_ENC(name, type, linkage)       \
+       BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), type), (linkage)
+#define BTF_VAR_SECINFO_ENC(type, offset, size)        \
+       (type), (offset), (size)
+
 #define BTF_MEMBER_ENC(name, type, bits_offset)        \
        (name), (type), (bits_offset)
 #define BTF_ENUM_ENC(name, val) (name), (val)
@@ -291,7 +296,6 @@ static struct btf_raw_test raw_tests[] = {
        .value_type_id = 3,
        .max_entries = 4,
 },
-
 {
        .descr = "struct test #3 Invalid member offset",
        .raw_types = {
@@ -319,7 +323,664 @@ static struct btf_raw_test raw_tests[] = {
        .btf_load_err = true,
        .err_str = "Invalid member bits_offset",
 },
-
+/*
+ * struct A {
+ *     unsigned long long m;
+ *     int n;
+ *     char o;
+ *     [3 bytes hole]
+ *     int p[8];
+ * };
+ */
+{
+       .descr = "global data test #1",
+       .raw_types = {
+               /* int */
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
+               /* unsigned long long */
+               BTF_TYPE_INT_ENC(0, 0, 0, 64, 8),               /* [2] */
+               /* char */
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 8, 1),   /* [3] */
+               /* int[8] */
+               BTF_TYPE_ARRAY_ENC(1, 1, 8),                    /* [4] */
+               /* struct A { */                                /* [5] */
+               BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 4), 48),
+               BTF_MEMBER_ENC(NAME_TBD, 2, 0), /* unsigned long long m;*/
+               BTF_MEMBER_ENC(NAME_TBD, 1, 64),/* int n;               */
+               BTF_MEMBER_ENC(NAME_TBD, 3, 96),/* char o;              */
+               BTF_MEMBER_ENC(NAME_TBD, 4, 128),/* int p[8]            */
+               /* } */
+               BTF_END_RAW,
+       },
+       .str_sec = "\0A\0m\0n\0o\0p",
+       .str_sec_size = sizeof("\0A\0m\0n\0o\0p"),
+       .map_type = BPF_MAP_TYPE_ARRAY,
+       .map_name = "struct_test1_map",
+       .key_size = sizeof(int),
+       .value_size = 48,
+       .key_type_id = 1,
+       .value_type_id = 5,
+       .max_entries = 4,
+},
+/*
+ * struct A {
+ *     unsigned long long m;
+ *     int n;
+ *     char o;
+ *     [3 bytes hole]
+ *     int p[8];
+ * };
+ * static struct A t; <- in .bss
+ */
+{
+       .descr = "global data test #2",
+       .raw_types = {
+               /* int */
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
+               /* unsigned long long */
+               BTF_TYPE_INT_ENC(0, 0, 0, 64, 8),               /* [2] */
+               /* char */
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 8, 1),   /* [3] */
+               /* int[8] */
+               BTF_TYPE_ARRAY_ENC(1, 1, 8),                    /* [4] */
+               /* struct A { */                                /* [5] */
+               BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 4), 48),
+               BTF_MEMBER_ENC(NAME_TBD, 2, 0), /* unsigned long long m;*/
+               BTF_MEMBER_ENC(NAME_TBD, 1, 64),/* int n;               */
+               BTF_MEMBER_ENC(NAME_TBD, 3, 96),/* char o;              */
+               BTF_MEMBER_ENC(NAME_TBD, 4, 128),/* int p[8]            */
+               /* } */
+               /* static struct A t */
+               BTF_VAR_ENC(NAME_TBD, 5, 0),                    /* [6] */
+               /* .bss section */                              /* [7] */
+               BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 48),
+               BTF_VAR_SECINFO_ENC(6, 0, 48),
+               BTF_END_RAW,
+       },
+       .str_sec = "\0A\0m\0n\0o\0p\0t\0.bss",
+       .str_sec_size = sizeof("\0A\0m\0n\0o\0p\0t\0.bss"),
+       .map_type = BPF_MAP_TYPE_ARRAY,
+       .map_name = ".bss",
+       .key_size = sizeof(int),
+       .value_size = 48,
+       .key_type_id = 0,
+       .value_type_id = 7,
+       .max_entries = 1,
+},
+{
+       .descr = "global data test #3",
+       .raw_types = {
+               /* int */
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
+               /* static int t */
+               BTF_VAR_ENC(NAME_TBD, 1, 0),                    /* [2] */
+               /* .bss section */                              /* [3] */
+               BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
+               BTF_VAR_SECINFO_ENC(2, 0, 4),
+               BTF_END_RAW,
+       },
+       .str_sec = "\0t\0.bss",
+       .str_sec_size = sizeof("\0t\0.bss"),
+       .map_type = BPF_MAP_TYPE_ARRAY,
+       .map_name = ".bss",
+       .key_size = sizeof(int),
+       .value_size = 4,
+       .key_type_id = 0,
+       .value_type_id = 3,
+       .max_entries = 1,
+},
+{
+       .descr = "global data test #4, unsupported linkage",
+       .raw_types = {
+               /* int */
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
+               /* static int t */
+               BTF_VAR_ENC(NAME_TBD, 1, 2),                    /* [2] */
+               /* .bss section */                              /* [3] */
+               BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
+               BTF_VAR_SECINFO_ENC(2, 0, 4),
+               BTF_END_RAW,
+       },
+       .str_sec = "\0t\0.bss",
+       .str_sec_size = sizeof("\0t\0.bss"),
+       .map_type = BPF_MAP_TYPE_ARRAY,
+       .map_name = ".bss",
+       .key_size = sizeof(int),
+       .value_size = 4,
+       .key_type_id = 0,
+       .value_type_id = 3,
+       .max_entries = 1,
+       .btf_load_err = true,
+       .err_str = "Linkage not supported",
+},
+{
+       .descr = "global data test #5, invalid var type",
+       .raw_types = {
+               /* static void t */
+               BTF_VAR_ENC(NAME_TBD, 0, 0),                    /* [1] */
+               /* .bss section */                              /* [2] */
+               BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
+               BTF_VAR_SECINFO_ENC(1, 0, 4),
+               BTF_END_RAW,
+       },
+       .str_sec = "\0t\0.bss",
+       .str_sec_size = sizeof("\0t\0.bss"),
+       .map_type = BPF_MAP_TYPE_ARRAY,
+       .map_name = ".bss",
+       .key_size = sizeof(int),
+       .value_size = 4,
+       .key_type_id = 0,
+       .value_type_id = 2,
+       .max_entries = 1,
+       .btf_load_err = true,
+       .err_str = "Invalid type_id",
+},
+{
+       .descr = "global data test #6, invalid var type (fwd type)",
+       .raw_types = {
+               /* union A */
+               BTF_TYPE_ENC(NAME_TBD,
+                            BTF_INFO_ENC(BTF_KIND_FWD, 1, 0), 0), /* [1] */
+               /* static union A t */
+               BTF_VAR_ENC(NAME_TBD, 1, 0),                    /* [2] */
+               /* .bss section */                              /* [3] */
+               BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
+               BTF_VAR_SECINFO_ENC(2, 0, 4),
+               BTF_END_RAW,
+       },
+       .str_sec = "\0A\0t\0.bss",
+       .str_sec_size = sizeof("\0A\0t\0.bss"),
+       .map_type = BPF_MAP_TYPE_ARRAY,
+       .map_name = ".bss",
+       .key_size = sizeof(int),
+       .value_size = 4,
+       .key_type_id = 0,
+       .value_type_id = 2,
+       .max_entries = 1,
+       .btf_load_err = true,
+       .err_str = "Invalid type",
+},
+{
+       .descr = "global data test #7, invalid var type (fwd type)",
+       .raw_types = {
+               /* union A */
+               BTF_TYPE_ENC(NAME_TBD,
+                            BTF_INFO_ENC(BTF_KIND_FWD, 1, 0), 0), /* [1] */
+               /* static union A t */
+               BTF_VAR_ENC(NAME_TBD, 1, 0),                    /* [2] */
+               /* .bss section */                              /* [3] */
+               BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
+               BTF_VAR_SECINFO_ENC(1, 0, 4),
+               BTF_END_RAW,
+       },
+       .str_sec = "\0A\0t\0.bss",
+       .str_sec_size = sizeof("\0A\0t\0.bss"),
+       .map_type = BPF_MAP_TYPE_ARRAY,
+       .map_name = ".bss",
+       .key_size = sizeof(int),
+       .value_size = 4,
+       .key_type_id = 0,
+       .value_type_id = 2,
+       .max_entries = 1,
+       .btf_load_err = true,
+       .err_str = "Invalid type",
+},
+{
+       .descr = "global data test #8, invalid var size",
+       .raw_types = {
+               /* int */
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
+               /* unsigned long long */
+               BTF_TYPE_INT_ENC(0, 0, 0, 64, 8),               /* [2] */
+               /* char */
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 8, 1),   /* [3] */
+               /* int[8] */
+               BTF_TYPE_ARRAY_ENC(1, 1, 8),                    /* [4] */
+               /* struct A { */                                /* [5] */
+               BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 4), 48),
+               BTF_MEMBER_ENC(NAME_TBD, 2, 0), /* unsigned long long m;*/
+               BTF_MEMBER_ENC(NAME_TBD, 1, 64),/* int n;               */
+               BTF_MEMBER_ENC(NAME_TBD, 3, 96),/* char o;              */
+               BTF_MEMBER_ENC(NAME_TBD, 4, 128),/* int p[8]            */
+               /* } */
+               /* static struct A t */
+               BTF_VAR_ENC(NAME_TBD, 5, 0),                    /* [6] */
+               /* .bss section */                              /* [7] */
+               BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 48),
+               BTF_VAR_SECINFO_ENC(6, 0, 47),
+               BTF_END_RAW,
+       },
+       .str_sec = "\0A\0m\0n\0o\0p\0t\0.bss",
+       .str_sec_size = sizeof("\0A\0m\0n\0o\0p\0t\0.bss"),
+       .map_type = BPF_MAP_TYPE_ARRAY,
+       .map_name = ".bss",
+       .key_size = sizeof(int),
+       .value_size = 48,
+       .key_type_id = 0,
+       .value_type_id = 7,
+       .max_entries = 1,
+       .btf_load_err = true,
+       .err_str = "Invalid size",
+},
+{
+       .descr = "global data test #9, invalid var size",
+       .raw_types = {
+               /* int */
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
+               /* unsigned long long */
+               BTF_TYPE_INT_ENC(0, 0, 0, 64, 8),               /* [2] */
+               /* char */
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 8, 1),   /* [3] */
+               /* int[8] */
+               BTF_TYPE_ARRAY_ENC(1, 1, 8),                    /* [4] */
+               /* struct A { */                                /* [5] */
+               BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 4), 48),
+               BTF_MEMBER_ENC(NAME_TBD, 2, 0), /* unsigned long long m;*/
+               BTF_MEMBER_ENC(NAME_TBD, 1, 64),/* int n;               */
+               BTF_MEMBER_ENC(NAME_TBD, 3, 96),/* char o;              */
+               BTF_MEMBER_ENC(NAME_TBD, 4, 128),/* int p[8]            */
+               /* } */
+               /* static struct A t */
+               BTF_VAR_ENC(NAME_TBD, 5, 0),                    /* [6] */
+               /* .bss section */                              /* [7] */
+               BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 46),
+               BTF_VAR_SECINFO_ENC(6, 0, 48),
+               BTF_END_RAW,
+       },
+       .str_sec = "\0A\0m\0n\0o\0p\0t\0.bss",
+       .str_sec_size = sizeof("\0A\0m\0n\0o\0p\0t\0.bss"),
+       .map_type = BPF_MAP_TYPE_ARRAY,
+       .map_name = ".bss",
+       .key_size = sizeof(int),
+       .value_size = 48,
+       .key_type_id = 0,
+       .value_type_id = 7,
+       .max_entries = 1,
+       .btf_load_err = true,
+       .err_str = "Invalid size",
+},
+{
+       .descr = "global data test #10, invalid var size",
+       .raw_types = {
+               /* int */
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
+               /* unsigned long long */
+               BTF_TYPE_INT_ENC(0, 0, 0, 64, 8),               /* [2] */
+               /* char */
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 8, 1),   /* [3] */
+               /* int[8] */
+               BTF_TYPE_ARRAY_ENC(1, 1, 8),                    /* [4] */
+               /* struct A { */                                /* [5] */
+               BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 4), 48),
+               BTF_MEMBER_ENC(NAME_TBD, 2, 0), /* unsigned long long m;*/
+               BTF_MEMBER_ENC(NAME_TBD, 1, 64),/* int n;               */
+               BTF_MEMBER_ENC(NAME_TBD, 3, 96),/* char o;              */
+               BTF_MEMBER_ENC(NAME_TBD, 4, 128),/* int p[8]            */
+               /* } */
+               /* static struct A t */
+               BTF_VAR_ENC(NAME_TBD, 5, 0),                    /* [6] */
+               /* .bss section */                              /* [7] */
+               BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 46),
+               BTF_VAR_SECINFO_ENC(6, 0, 46),
+               BTF_END_RAW,
+       },
+       .str_sec = "\0A\0m\0n\0o\0p\0t\0.bss",
+       .str_sec_size = sizeof("\0A\0m\0n\0o\0p\0t\0.bss"),
+       .map_type = BPF_MAP_TYPE_ARRAY,
+       .map_name = ".bss",
+       .key_size = sizeof(int),
+       .value_size = 48,
+       .key_type_id = 0,
+       .value_type_id = 7,
+       .max_entries = 1,
+       .btf_load_err = true,
+       .err_str = "Invalid size",
+},
+{
+       .descr = "global data test #11, multiple section members",
+       .raw_types = {
+               /* int */
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
+               /* unsigned long long */
+               BTF_TYPE_INT_ENC(0, 0, 0, 64, 8),               /* [2] */
+               /* char */
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 8, 1),   /* [3] */
+               /* int[8] */
+               BTF_TYPE_ARRAY_ENC(1, 1, 8),                    /* [4] */
+               /* struct A { */                                /* [5] */
+               BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 4), 48),
+               BTF_MEMBER_ENC(NAME_TBD, 2, 0), /* unsigned long long m;*/
+               BTF_MEMBER_ENC(NAME_TBD, 1, 64),/* int n;               */
+               BTF_MEMBER_ENC(NAME_TBD, 3, 96),/* char o;              */
+               BTF_MEMBER_ENC(NAME_TBD, 4, 128),/* int p[8]            */
+               /* } */
+               /* static struct A t */
+               BTF_VAR_ENC(NAME_TBD, 5, 0),                    /* [6] */
+               /* static int u */
+               BTF_VAR_ENC(NAME_TBD, 1, 0),                    /* [7] */
+               /* .bss section */                              /* [8] */
+               BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 2), 62),
+               BTF_VAR_SECINFO_ENC(6, 10, 48),
+               BTF_VAR_SECINFO_ENC(7, 58, 4),
+               BTF_END_RAW,
+       },
+       .str_sec = "\0A\0m\0n\0o\0p\0t\0u\0.bss",
+       .str_sec_size = sizeof("\0A\0m\0n\0o\0p\0t\0u\0.bss"),
+       .map_type = BPF_MAP_TYPE_ARRAY,
+       .map_name = ".bss",
+       .key_size = sizeof(int),
+       .value_size = 62,
+       .key_type_id = 0,
+       .value_type_id = 8,
+       .max_entries = 1,
+},
+{
+       .descr = "global data test #12, invalid offset",
+       .raw_types = {
+               /* int */
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
+               /* unsigned long long */
+               BTF_TYPE_INT_ENC(0, 0, 0, 64, 8),               /* [2] */
+               /* char */
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 8, 1),   /* [3] */
+               /* int[8] */
+               BTF_TYPE_ARRAY_ENC(1, 1, 8),                    /* [4] */
+               /* struct A { */                                /* [5] */
+               BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 4), 48),
+               BTF_MEMBER_ENC(NAME_TBD, 2, 0), /* unsigned long long m;*/
+               BTF_MEMBER_ENC(NAME_TBD, 1, 64),/* int n;               */
+               BTF_MEMBER_ENC(NAME_TBD, 3, 96),/* char o;              */
+               BTF_MEMBER_ENC(NAME_TBD, 4, 128),/* int p[8]            */
+               /* } */
+               /* static struct A t */
+               BTF_VAR_ENC(NAME_TBD, 5, 0),                    /* [6] */
+               /* static int u */
+               BTF_VAR_ENC(NAME_TBD, 1, 0),                    /* [7] */
+               /* .bss section */                              /* [8] */
+               BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 2), 62),
+               BTF_VAR_SECINFO_ENC(6, 10, 48),
+               BTF_VAR_SECINFO_ENC(7, 60, 4),
+               BTF_END_RAW,
+       },
+       .str_sec = "\0A\0m\0n\0o\0p\0t\0u\0.bss",
+       .str_sec_size = sizeof("\0A\0m\0n\0o\0p\0t\0u\0.bss"),
+       .map_type = BPF_MAP_TYPE_ARRAY,
+       .map_name = ".bss",
+       .key_size = sizeof(int),
+       .value_size = 62,
+       .key_type_id = 0,
+       .value_type_id = 8,
+       .max_entries = 1,
+       .btf_load_err = true,
+       .err_str = "Invalid offset+size",
+},
+{
+       .descr = "global data test #13, invalid offset",
+       .raw_types = {
+               /* int */
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
+               /* unsigned long long */
+               BTF_TYPE_INT_ENC(0, 0, 0, 64, 8),               /* [2] */
+               /* char */
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 8, 1),   /* [3] */
+               /* int[8] */
+               BTF_TYPE_ARRAY_ENC(1, 1, 8),                    /* [4] */
+               /* struct A { */                                /* [5] */
+               BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 4), 48),
+               BTF_MEMBER_ENC(NAME_TBD, 2, 0), /* unsigned long long m;*/
+               BTF_MEMBER_ENC(NAME_TBD, 1, 64),/* int n;               */
+               BTF_MEMBER_ENC(NAME_TBD, 3, 96),/* char o;              */
+               BTF_MEMBER_ENC(NAME_TBD, 4, 128),/* int p[8]            */
+               /* } */
+               /* static struct A t */
+               BTF_VAR_ENC(NAME_TBD, 5, 0),                    /* [6] */
+               /* static int u */
+               BTF_VAR_ENC(NAME_TBD, 1, 0),                    /* [7] */
+               /* .bss section */                              /* [8] */
+               BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 2), 62),
+               BTF_VAR_SECINFO_ENC(6, 10, 48),
+               BTF_VAR_SECINFO_ENC(7, 12, 4),
+               BTF_END_RAW,
+       },
+       .str_sec = "\0A\0m\0n\0o\0p\0t\0u\0.bss",
+       .str_sec_size = sizeof("\0A\0m\0n\0o\0p\0t\0u\0.bss"),
+       .map_type = BPF_MAP_TYPE_ARRAY,
+       .map_name = ".bss",
+       .key_size = sizeof(int),
+       .value_size = 62,
+       .key_type_id = 0,
+       .value_type_id = 8,
+       .max_entries = 1,
+       .btf_load_err = true,
+       .err_str = "Invalid offset",
+},
+{
+       .descr = "global data test #14, invalid offset",
+       .raw_types = {
+               /* int */
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
+               /* unsigned long long */
+               BTF_TYPE_INT_ENC(0, 0, 0, 64, 8),               /* [2] */
+               /* char */
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 8, 1),   /* [3] */
+               /* int[8] */
+               BTF_TYPE_ARRAY_ENC(1, 1, 8),                    /* [4] */
+               /* struct A { */                                /* [5] */
+               BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 4), 48),
+               BTF_MEMBER_ENC(NAME_TBD, 2, 0), /* unsigned long long m;*/
+               BTF_MEMBER_ENC(NAME_TBD, 1, 64),/* int n;               */
+               BTF_MEMBER_ENC(NAME_TBD, 3, 96),/* char o;              */
+               BTF_MEMBER_ENC(NAME_TBD, 4, 128),/* int p[8]            */
+               /* } */
+               /* static struct A t */
+               BTF_VAR_ENC(NAME_TBD, 5, 0),                    /* [6] */
+               /* static int u */
+               BTF_VAR_ENC(NAME_TBD, 1, 0),                    /* [7] */
+               /* .bss section */                              /* [8] */
+               BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 2), 62),
+               BTF_VAR_SECINFO_ENC(7, 58, 4),
+               BTF_VAR_SECINFO_ENC(6, 10, 48),
+               BTF_END_RAW,
+       },
+       .str_sec = "\0A\0m\0n\0o\0p\0t\0u\0.bss",
+       .str_sec_size = sizeof("\0A\0m\0n\0o\0p\0t\0u\0.bss"),
+       .map_type = BPF_MAP_TYPE_ARRAY,
+       .map_name = ".bss",
+       .key_size = sizeof(int),
+       .value_size = 62,
+       .key_type_id = 0,
+       .value_type_id = 8,
+       .max_entries = 1,
+       .btf_load_err = true,
+       .err_str = "Invalid offset",
+},
+{
+       .descr = "global data test #15, not var kind",
+       .raw_types = {
+               /* int */
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
+               BTF_VAR_ENC(NAME_TBD, 1, 0),                    /* [2] */
+               /* .bss section */                              /* [3] */
+               BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
+               BTF_VAR_SECINFO_ENC(1, 0, 4),
+               BTF_END_RAW,
+       },
+       .str_sec = "\0A\0t\0.bss",
+       .str_sec_size = sizeof("\0A\0t\0.bss"),
+       .map_type = BPF_MAP_TYPE_ARRAY,
+       .map_name = ".bss",
+       .key_size = sizeof(int),
+       .value_size = 4,
+       .key_type_id = 0,
+       .value_type_id = 3,
+       .max_entries = 1,
+       .btf_load_err = true,
+       .err_str = "Not a VAR kind member",
+},
+{
+       .descr = "global data test #16, invalid var referencing sec",
+       .raw_types = {
+               /* int */
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
+               BTF_VAR_ENC(NAME_TBD, 5, 0),                    /* [2] */
+               BTF_VAR_ENC(NAME_TBD, 2, 0),                    /* [3] */
+               /* a section */                                 /* [4] */
+               BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
+               BTF_VAR_SECINFO_ENC(3, 0, 4),
+               /* a section */                                 /* [5] */
+               BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
+               BTF_VAR_SECINFO_ENC(6, 0, 4),
+               BTF_VAR_ENC(NAME_TBD, 1, 0),                    /* [6] */
+               BTF_END_RAW,
+       },
+       .str_sec = "\0A\0t\0s\0a\0a",
+       .str_sec_size = sizeof("\0A\0t\0s\0a\0a"),
+       .map_type = BPF_MAP_TYPE_ARRAY,
+       .map_name = ".bss",
+       .key_size = sizeof(int),
+       .value_size = 4,
+       .key_type_id = 0,
+       .value_type_id = 4,
+       .max_entries = 1,
+       .btf_load_err = true,
+       .err_str = "Invalid type_id",
+},
+{
+       .descr = "global data test #17, invalid var referencing var",
+       .raw_types = {
+               /* int */
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
+               BTF_VAR_ENC(NAME_TBD, 1, 0),                    /* [2] */
+               BTF_VAR_ENC(NAME_TBD, 2, 0),                    /* [3] */
+               /* a section */                                 /* [4] */
+               BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
+               BTF_VAR_SECINFO_ENC(3, 0, 4),
+               BTF_END_RAW,
+       },
+       .str_sec = "\0A\0t\0s\0a\0a",
+       .str_sec_size = sizeof("\0A\0t\0s\0a\0a"),
+       .map_type = BPF_MAP_TYPE_ARRAY,
+       .map_name = ".bss",
+       .key_size = sizeof(int),
+       .value_size = 4,
+       .key_type_id = 0,
+       .value_type_id = 4,
+       .max_entries = 1,
+       .btf_load_err = true,
+       .err_str = "Invalid type_id",
+},
+{
+       .descr = "global data test #18, invalid var loop",
+       .raw_types = {
+               /* int */
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
+               BTF_VAR_ENC(NAME_TBD, 2, 0),                    /* [2] */
+               /* .bss section */                              /* [3] */
+               BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
+               BTF_VAR_SECINFO_ENC(2, 0, 4),
+               BTF_END_RAW,
+       },
+       .str_sec = "\0A\0t\0aaa",
+       .str_sec_size = sizeof("\0A\0t\0aaa"),
+       .map_type = BPF_MAP_TYPE_ARRAY,
+       .map_name = ".bss",
+       .key_size = sizeof(int),
+       .value_size = 4,
+       .key_type_id = 0,
+       .value_type_id = 4,
+       .max_entries = 1,
+       .btf_load_err = true,
+       .err_str = "Invalid type_id",
+},
+{
+       .descr = "global data test #19, invalid var referencing var",
+       .raw_types = {
+               /* int */
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
+               BTF_VAR_ENC(NAME_TBD, 3, 0),                    /* [2] */
+               BTF_VAR_ENC(NAME_TBD, 1, 0),                    /* [3] */
+               BTF_END_RAW,
+       },
+       .str_sec = "\0A\0t\0s\0a\0a",
+       .str_sec_size = sizeof("\0A\0t\0s\0a\0a"),
+       .map_type = BPF_MAP_TYPE_ARRAY,
+       .map_name = ".bss",
+       .key_size = sizeof(int),
+       .value_size = 4,
+       .key_type_id = 0,
+       .value_type_id = 4,
+       .max_entries = 1,
+       .btf_load_err = true,
+       .err_str = "Invalid type_id",
+},
+{
+       .descr = "global data test #20, invalid ptr referencing var",
+       .raw_types = {
+               /* int */
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
+               /* PTR type_id=3        */                      /* [2] */
+               BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 3),
+               BTF_VAR_ENC(NAME_TBD, 1, 0),                    /* [3] */
+               BTF_END_RAW,
+       },
+       .str_sec = "\0A\0t\0s\0a\0a",
+       .str_sec_size = sizeof("\0A\0t\0s\0a\0a"),
+       .map_type = BPF_MAP_TYPE_ARRAY,
+       .map_name = ".bss",
+       .key_size = sizeof(int),
+       .value_size = 4,
+       .key_type_id = 0,
+       .value_type_id = 4,
+       .max_entries = 1,
+       .btf_load_err = true,
+       .err_str = "Invalid type_id",
+},
+{
+       .descr = "global data test #21, var included in struct",
+       .raw_types = {
+               /* int */
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
+               /* struct A { */                                /* [2] */
+               BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), sizeof(int) * 2),
+               BTF_MEMBER_ENC(NAME_TBD, 1, 0), /* int m; */
+               BTF_MEMBER_ENC(NAME_TBD, 3, 32),/* VAR type_id=3; */
+               /* } */
+               BTF_VAR_ENC(NAME_TBD, 1, 0),                    /* [3] */
+               BTF_END_RAW,
+       },
+       .str_sec = "\0A\0t\0s\0a\0a",
+       .str_sec_size = sizeof("\0A\0t\0s\0a\0a"),
+       .map_type = BPF_MAP_TYPE_ARRAY,
+       .map_name = ".bss",
+       .key_size = sizeof(int),
+       .value_size = 4,
+       .key_type_id = 0,
+       .value_type_id = 4,
+       .max_entries = 1,
+       .btf_load_err = true,
+       .err_str = "Invalid member",
+},
+{
+       .descr = "global data test #22, array of var",
+       .raw_types = {
+               /* int */
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
+               BTF_TYPE_ARRAY_ENC(3, 1, 4),                    /* [2] */
+               BTF_VAR_ENC(NAME_TBD, 1, 0),                    /* [3] */
+               BTF_END_RAW,
+       },
+       .str_sec = "\0A\0t\0s\0a\0a",
+       .str_sec_size = sizeof("\0A\0t\0s\0a\0a"),
+       .map_type = BPF_MAP_TYPE_ARRAY,
+       .map_name = ".bss",
+       .key_size = sizeof(int),
+       .value_size = 4,
+       .key_type_id = 0,
+       .value_type_id = 4,
+       .max_entries = 1,
+       .btf_load_err = true,
+       .err_str = "Invalid elem",
+},
 /* Test member exceeds the size of struct.
  *
  * struct A {
@@ -3677,6 +4338,7 @@ struct pprint_mapv {
        } aenum;
        uint32_t ui32b;
        uint32_t bits2c:2;
+       uint8_t si8_4[2][2];
 };
 
 #ifdef __SIZEOF_INT128__
@@ -3729,7 +4391,7 @@ static struct btf_raw_test pprint_test_template[] = {
                BTF_ENUM_ENC(NAME_TBD, 2),
                BTF_ENUM_ENC(NAME_TBD, 3),
                /* struct pprint_mapv */                /* [16] */
-               BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 10), 40),
+               BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 11), 40),
                BTF_MEMBER_ENC(NAME_TBD, 11, 0),        /* uint32_t ui32 */
                BTF_MEMBER_ENC(NAME_TBD, 10, 32),       /* uint16_t ui16 */
                BTF_MEMBER_ENC(NAME_TBD, 12, 64),       /* int32_t si32 */
@@ -3740,9 +4402,12 @@ static struct btf_raw_test pprint_test_template[] = {
                BTF_MEMBER_ENC(NAME_TBD, 15, 192),      /* aenum */
                BTF_MEMBER_ENC(NAME_TBD, 11, 224),      /* uint32_t ui32b */
                BTF_MEMBER_ENC(NAME_TBD, 6, 256),       /* bits2c */
+               BTF_MEMBER_ENC(NAME_TBD, 17, 264),      /* si8_4 */
+               BTF_TYPE_ARRAY_ENC(18, 1, 2),           /* [17] */
+               BTF_TYPE_ARRAY_ENC(1, 1, 2),            /* [18] */
                BTF_END_RAW,
        },
-       BTF_STR_SEC("\0unsigned char\0unsigned short\0unsigned int\0int\0unsigned long long\0uint8_t\0uint16_t\0uint32_t\0int32_t\0uint64_t\0ui64\0ui8a\0ENUM_ZERO\0ENUM_ONE\0ENUM_TWO\0ENUM_THREE\0pprint_mapv\0ui32\0ui16\0si32\0unused_bits2a\0bits28\0unused_bits2b\0aenum\0ui32b\0bits2c"),
+       BTF_STR_SEC("\0unsigned char\0unsigned short\0unsigned int\0int\0unsigned long long\0uint8_t\0uint16_t\0uint32_t\0int32_t\0uint64_t\0ui64\0ui8a\0ENUM_ZERO\0ENUM_ONE\0ENUM_TWO\0ENUM_THREE\0pprint_mapv\0ui32\0ui16\0si32\0unused_bits2a\0bits28\0unused_bits2b\0aenum\0ui32b\0bits2c\0si8_4"),
        .key_size = sizeof(unsigned int),
        .value_size = sizeof(struct pprint_mapv),
        .key_type_id = 3,       /* unsigned int */
@@ -3791,7 +4456,7 @@ static struct btf_raw_test pprint_test_template[] = {
                BTF_ENUM_ENC(NAME_TBD, 2),
                BTF_ENUM_ENC(NAME_TBD, 3),
                /* struct pprint_mapv */                /* [16] */
-               BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 10), 40),
+               BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 11), 40),
                BTF_MEMBER_ENC(NAME_TBD, 11, BTF_MEMBER_OFFSET(0, 0)),  /* uint32_t ui32 */
                BTF_MEMBER_ENC(NAME_TBD, 10, BTF_MEMBER_OFFSET(0, 32)), /* uint16_t ui16 */
                BTF_MEMBER_ENC(NAME_TBD, 12, BTF_MEMBER_OFFSET(0, 64)), /* int32_t si32 */
@@ -3802,9 +4467,12 @@ static struct btf_raw_test pprint_test_template[] = {
                BTF_MEMBER_ENC(NAME_TBD, 15, BTF_MEMBER_OFFSET(0, 192)),        /* aenum */
                BTF_MEMBER_ENC(NAME_TBD, 11, BTF_MEMBER_OFFSET(0, 224)),        /* uint32_t ui32b */
                BTF_MEMBER_ENC(NAME_TBD, 6, BTF_MEMBER_OFFSET(2, 256)), /* bits2c */
+               BTF_MEMBER_ENC(NAME_TBD, 17, 264),      /* si8_4 */
+               BTF_TYPE_ARRAY_ENC(18, 1, 2),           /* [17] */
+               BTF_TYPE_ARRAY_ENC(1, 1, 2),            /* [18] */
                BTF_END_RAW,
        },
-       BTF_STR_SEC("\0unsigned char\0unsigned short\0unsigned int\0int\0unsigned long long\0uint8_t\0uint16_t\0uint32_t\0int32_t\0uint64_t\0ui64\0ui8a\0ENUM_ZERO\0ENUM_ONE\0ENUM_TWO\0ENUM_THREE\0pprint_mapv\0ui32\0ui16\0si32\0unused_bits2a\0bits28\0unused_bits2b\0aenum\0ui32b\0bits2c"),
+       BTF_STR_SEC("\0unsigned char\0unsigned short\0unsigned int\0int\0unsigned long long\0uint8_t\0uint16_t\0uint32_t\0int32_t\0uint64_t\0ui64\0ui8a\0ENUM_ZERO\0ENUM_ONE\0ENUM_TWO\0ENUM_THREE\0pprint_mapv\0ui32\0ui16\0si32\0unused_bits2a\0bits28\0unused_bits2b\0aenum\0ui32b\0bits2c\0si8_4"),
        .key_size = sizeof(unsigned int),
        .value_size = sizeof(struct pprint_mapv),
        .key_type_id = 3,       /* unsigned int */
@@ -3855,7 +4523,7 @@ static struct btf_raw_test pprint_test_template[] = {
                BTF_ENUM_ENC(NAME_TBD, 2),
                BTF_ENUM_ENC(NAME_TBD, 3),
                /* struct pprint_mapv */                /* [16] */
-               BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 10), 40),
+               BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 11), 40),
                BTF_MEMBER_ENC(NAME_TBD, 11, BTF_MEMBER_OFFSET(0, 0)),  /* uint32_t ui32 */
                BTF_MEMBER_ENC(NAME_TBD, 10, BTF_MEMBER_OFFSET(0, 32)), /* uint16_t ui16 */
                BTF_MEMBER_ENC(NAME_TBD, 12, BTF_MEMBER_OFFSET(0, 64)), /* int32_t si32 */
@@ -3866,13 +4534,16 @@ static struct btf_raw_test pprint_test_template[] = {
                BTF_MEMBER_ENC(NAME_TBD, 15, BTF_MEMBER_OFFSET(0, 192)),        /* aenum */
                BTF_MEMBER_ENC(NAME_TBD, 11, BTF_MEMBER_OFFSET(0, 224)),        /* uint32_t ui32b */
                BTF_MEMBER_ENC(NAME_TBD, 17, BTF_MEMBER_OFFSET(2, 256)),        /* bits2c */
+               BTF_MEMBER_ENC(NAME_TBD, 20, BTF_MEMBER_OFFSET(0, 264)),        /* si8_4 */
                /* typedef unsigned int ___int */       /* [17] */
                BTF_TYPEDEF_ENC(NAME_TBD, 18),
                BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_VOLATILE, 0, 0), 6),      /* [18] */
                BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), 15),        /* [19] */
+               BTF_TYPE_ARRAY_ENC(21, 1, 2),                                   /* [20] */
+               BTF_TYPE_ARRAY_ENC(1, 1, 2),                                    /* [21] */
                BTF_END_RAW,
        },
-       BTF_STR_SEC("\0unsigned char\0unsigned short\0unsigned int\0int\0unsigned long long\0uint8_t\0uint16_t\0uint32_t\0int32_t\0uint64_t\0ui64\0ui8a\0ENUM_ZERO\0ENUM_ONE\0ENUM_TWO\0ENUM_THREE\0pprint_mapv\0ui32\0ui16\0si32\0unused_bits2a\0bits28\0unused_bits2b\0aenum\0ui32b\0bits2c\0___int"),
+       BTF_STR_SEC("\0unsigned char\0unsigned short\0unsigned int\0int\0unsigned long long\0uint8_t\0uint16_t\0uint32_t\0int32_t\0uint64_t\0ui64\0ui8a\0ENUM_ZERO\0ENUM_ONE\0ENUM_TWO\0ENUM_THREE\0pprint_mapv\0ui32\0ui16\0si32\0unused_bits2a\0bits28\0unused_bits2b\0aenum\0ui32b\0bits2c\0___int\0si8_4"),
        .key_size = sizeof(unsigned int),
        .value_size = sizeof(struct pprint_mapv),
        .key_type_id = 3,       /* unsigned int */
@@ -4007,6 +4678,10 @@ static void set_pprint_mapv(enum pprint_mapv_kind_t mapv_kind,
                        v->aenum = i & 0x03;
                        v->ui32b = 4;
                        v->bits2c = 1;
+                       v->si8_4[0][0] = (cpu + i) & 0xff;
+                       v->si8_4[0][1] = (cpu + i + 1) & 0xff;
+                       v->si8_4[1][0] = (cpu + i + 2) & 0xff;
+                       v->si8_4[1][1] = (cpu + i + 3) & 0xff;
                        v = (void *)v + rounded_value_size;
                }
        }
@@ -4040,7 +4715,7 @@ ssize_t get_pprint_expected_line(enum pprint_mapv_kind_t mapv_kind,
                nexpected_line = snprintf(expected_line, line_size,
                                          "%s%u: {%u,0,%d,0x%x,0x%x,0x%x,"
                                          "{%lu|[%u,%u,%u,%u,%u,%u,%u,%u]},%s,"
-                                         "%u,0x%x}\n",
+                                         "%u,0x%x,[[%d,%d],[%d,%d]]}\n",
                                          percpu_map ? "\tcpu" : "",
                                          percpu_map ? cpu : next_key,
                                          v->ui32, v->si32,
@@ -4054,7 +4729,9 @@ ssize_t get_pprint_expected_line(enum pprint_mapv_kind_t mapv_kind,
                                          v->ui8a[6], v->ui8a[7],
                                          pprint_enum_str[v->aenum],
                                          v->ui32b,
-                                         v->bits2c);
+                                         v->bits2c,
+                                         v->si8_4[0][0], v->si8_4[0][1],
+                                         v->si8_4[1][0], v->si8_4[1][1]);
        }
 
 #ifdef __SIZEOF_INT128__
@@ -5776,6 +6453,53 @@ const struct btf_dedup_test dedup_tests[] = {
                .dedup_table_size = 1, /* force hash collisions */
        },
 },
+{
+       .descr = "dedup: void equiv check",
+       /*
+        * // CU 1:
+        * struct s {
+        *      struct {} *x;
+        * };
+        * // CU 2:
+        * struct s {
+        *      int *x;
+        * };
+        */
+       .input = {
+               .raw_types = {
+                       /* CU 1 */
+                       BTF_STRUCT_ENC(0, 0, 1),                                /* [1] struct {}  */
+                       BTF_PTR_ENC(1),                                         /* [2] ptr -> [1] */
+                       BTF_STRUCT_ENC(NAME_NTH(1), 1, 8),                      /* [3] struct s   */
+                               BTF_MEMBER_ENC(NAME_NTH(2), 2, 0),
+                       /* CU 2 */
+                       BTF_PTR_ENC(0),                                         /* [4] ptr -> void */
+                       BTF_STRUCT_ENC(NAME_NTH(1), 1, 8),                      /* [5] struct s   */
+                               BTF_MEMBER_ENC(NAME_NTH(2), 4, 0),
+                       BTF_END_RAW,
+               },
+               BTF_STR_SEC("\0s\0x"),
+       },
+       .expect = {
+               .raw_types = {
+                       /* CU 1 */
+                       BTF_STRUCT_ENC(0, 0, 1),                                /* [1] struct {}  */
+                       BTF_PTR_ENC(1),                                         /* [2] ptr -> [1] */
+                       BTF_STRUCT_ENC(NAME_NTH(1), 1, 8),                      /* [3] struct s   */
+                               BTF_MEMBER_ENC(NAME_NTH(2), 2, 0),
+                       /* CU 2 */
+                       BTF_PTR_ENC(0),                                         /* [4] ptr -> void */
+                       BTF_STRUCT_ENC(NAME_NTH(1), 1, 8),                      /* [5] struct s   */
+                               BTF_MEMBER_ENC(NAME_NTH(2), 4, 0),
+                       BTF_END_RAW,
+               },
+               BTF_STR_SEC("\0s\0x"),
+       },
+       .opts = {
+               .dont_resolve_fwds = false,
+               .dedup_table_size = 1, /* force hash collisions */
+       },
+},
 {
        .descr = "dedup: all possible kinds (no duplicates)",
        .input = {
index 84bea3985d64fb154ac6bdcac4a31e166e175834..a7f95106119fc8a411569345e66fc2bcc8f2891d 100755 (executable)
@@ -1055,7 +1055,7 @@ try:
 
     start_test("Test if netdev removal waits for translation...")
     delay_msec = 500
-    sim.dfs["bpf_bind_verifier_delay"] = delay_msec
+    sim.dfs["sdev/bpf_bind_verifier_delay"] = delay_msec
     start = time.time()
     cmd_line = "tc filter add dev %s ingress bpf %s da skip_sw" % \
                (sim['ifname'], obj)
index 5d10aee9e27744f75db45baf5889f56381ce287c..bf5c90998916a60961ede884b9ea2d5393813cdd 100644 (file)
@@ -9,6 +9,7 @@
 
 int error_cnt, pass_cnt;
 bool jit_enabled;
+bool verifier_stats = false;
 
 struct ipv4_packet pkt_v4 = {
        .eth.h_proto = __bpf_constant_htons(ETH_P_IP),
@@ -162,12 +163,15 @@ void *spin_lock_thread(void *arg)
 #include <prog_tests/tests.h>
 #undef DECLARE
 
-int main(void)
+int main(int ac, char **av)
 {
        srand(time(NULL));
 
        jit_enabled = is_jit_enabled();
 
+       if (ac == 2 && strcmp(av[1], "-s") == 0)
+               verifier_stats = true;
+
 #define CALL
 #include <prog_tests/tests.h>
 #undef CALL
index 51a07367cd433f5ae9055083b4d1a7e6868fdae0..f095e1d4c657f4ee56a2198ce26561274d9b7115 100644 (file)
@@ -40,6 +40,7 @@ typedef __u16 __sum16;
 
 extern int error_cnt, pass_cnt;
 extern bool jit_enabled;
+extern bool verifier_stats;
 
 #define MAGIC_BYTES 123
 
index c805adb88f3ac830044ec8426e4a4f5e9ef77fbd..d4d8d5d3b06e178ec7ebdd6ac6ebbe99c5d96c8e 100755 (executable)
@@ -15,6 +15,12 @@ readonly ns2_v4=192.168.1.2
 readonly ns1_v6=fd::1
 readonly ns2_v6=fd::2
 
+# Must match port used by bpf program
+readonly udpport=5555
+# MPLSoverUDP
+readonly mplsudpport=6635
+readonly mplsproto=137
+
 readonly infile="$(mktemp)"
 readonly outfile="$(mktemp)"
 
@@ -38,8 +44,8 @@ setup() {
        # clamp route to reserve room for tunnel headers
        ip -netns "${ns1}" -4 route flush table main
        ip -netns "${ns1}" -6 route flush table main
-       ip -netns "${ns1}" -4 route add "${ns2_v4}" mtu 1476 dev veth1
-       ip -netns "${ns1}" -6 route add "${ns2_v6}" mtu 1456 dev veth1
+       ip -netns "${ns1}" -4 route add "${ns2_v4}" mtu 1458 dev veth1
+       ip -netns "${ns1}" -6 route add "${ns2_v6}" mtu 1438 dev veth1
 
        sleep 1
 
@@ -86,30 +92,44 @@ set -e
 # no arguments: automated test, run all
 if [[ "$#" -eq "0" ]]; then
        echo "ipip"
-       $0 ipv4 ipip 100
+       $0 ipv4 ipip none 100
 
        echo "ip6ip6"
-       $0 ipv6 ip6tnl 100
+       $0 ipv6 ip6tnl none 100
+
+       for mac in none mpls eth ; do
+               echo "ip gre $mac"
+               $0 ipv4 gre $mac 100
+
+               echo "ip6 gre $mac"
+               $0 ipv6 ip6gre $mac 100
+
+               echo "ip gre $mac gso"
+               $0 ipv4 gre $mac 2000
 
-       echo "ip gre"
-       $0 ipv4 gre 100
+               echo "ip6 gre $mac gso"
+               $0 ipv6 ip6gre $mac 2000
 
-       echo "ip6 gre"
-       $0 ipv6 ip6gre 100
+               echo "ip udp $mac"
+               $0 ipv4 udp $mac 100
 
-       echo "ip gre gso"
-       $0 ipv4 gre 2000
+               echo "ip6 udp $mac"
+               $0 ipv6 ip6udp $mac 100
 
-       echo "ip6 gre gso"
-       $0 ipv6 ip6gre 2000
+               echo "ip udp $mac gso"
+               $0 ipv4 udp $mac 2000
+
+               echo "ip6 udp $mac gso"
+               $0 ipv6 ip6udp $mac 2000
+       done
 
        echo "OK. All tests passed"
        exit 0
 fi
 
-if [[ "$#" -ne "3" ]]; then
+if [[ "$#" -ne "4" ]]; then
        echo "Usage: $0"
-       echo "   or: $0 <ipv4|ipv6> <tuntype> <data_len>"
+       echo "   or: $0 <ipv4|ipv6> <tuntype> <none|mpls|eth> <data_len>"
        exit 1
 fi
 
@@ -117,12 +137,24 @@ case "$1" in
 "ipv4")
        readonly addr1="${ns1_v4}"
        readonly addr2="${ns2_v4}"
-       readonly netcat_opt=-4
+       readonly ipproto=4
+       readonly netcat_opt=-${ipproto}
+       readonly foumod=fou
+       readonly foutype=ipip
+       readonly fouproto=4
+       readonly fouproto_mpls=${mplsproto}
+       readonly gretaptype=gretap
        ;;
 "ipv6")
        readonly addr1="${ns1_v6}"
        readonly addr2="${ns2_v6}"
-       readonly netcat_opt=-6
+       readonly ipproto=6
+       readonly netcat_opt=-${ipproto}
+       readonly foumod=fou6
+       readonly foutype=ip6tnl
+       readonly fouproto="41 -6"
+       readonly fouproto_mpls="${mplsproto} -6"
+       readonly gretaptype=ip6gretap
        ;;
 *)
        echo "unknown arg: $1"
@@ -131,9 +163,10 @@ case "$1" in
 esac
 
 readonly tuntype=$2
-readonly datalen=$3
+readonly mac=$3
+readonly datalen=$4
 
-echo "encap ${addr1} to ${addr2}, type ${tuntype}, len ${datalen}"
+echo "encap ${addr1} to ${addr2}, type ${tuntype}, mac ${mac} len ${datalen}"
 
 trap cleanup EXIT
 
@@ -150,16 +183,63 @@ verify_data
 ip netns exec "${ns1}" tc qdisc add dev veth1 clsact
 ip netns exec "${ns1}" tc filter add dev veth1 egress \
        bpf direct-action object-file ./test_tc_tunnel.o \
-       section "encap_${tuntype}"
+       section "encap_${tuntype}_${mac}"
 echo "test bpf encap without decap (expect failure)"
 server_listen
 ! client_connect
 
+if [[ "$tuntype" =~ "udp" ]]; then
+       # Set up fou tunnel.
+       ttype="${foutype}"
+       targs="encap fou encap-sport auto encap-dport $udpport"
+       # fou may be a module; allow this to fail.
+       modprobe "${foumod}" ||true
+       if [[ "$mac" == "mpls" ]]; then
+               dport=${mplsudpport}
+               dproto=${fouproto_mpls}
+               tmode="mode any ttl 255"
+       else
+               dport=${udpport}
+               dproto=${fouproto}
+       fi
+       ip netns exec "${ns2}" ip fou add port $dport ipproto ${dproto}
+       targs="encap fou encap-sport auto encap-dport $dport"
+elif [[ "$tuntype" =~ "gre" && "$mac" == "eth" ]]; then
+       ttype=$gretaptype
+else
+       ttype=$tuntype
+       targs=""
+fi
+
 # serverside, insert decap module
 # server is still running
 # client can connect again
-ip netns exec "${ns2}" ip link add dev testtun0 type "${tuntype}" \
-       remote "${addr1}" local "${addr2}"
+ip netns exec "${ns2}" ip link add name testtun0 type "${ttype}" \
+       ${tmode} remote "${addr1}" local "${addr2}" $targs
+
+expect_tun_fail=0
+
+if [[ "$tuntype" == "ip6udp" && "$mac" == "mpls" ]]; then
+       # No support for MPLS IPv6 fou tunnel; expect failure.
+       expect_tun_fail=1
+elif [[ "$tuntype" =~ "udp" && "$mac" == "eth" ]]; then
+       # No support for TEB fou tunnel; expect failure.
+       expect_tun_fail=1
+elif [[ "$tuntype" =~ "gre" && "$mac" == "eth" ]]; then
+       # Share ethernet address between tunnel/veth2 so L2 decap works.
+       ethaddr=$(ip netns exec "${ns2}" ip link show veth2 | \
+                 awk '/ether/ { print $2 }')
+       ip netns exec "${ns2}" ip link set testtun0 address $ethaddr
+elif [[ "$mac" == "mpls" ]]; then
+       modprobe mpls_iptunnel ||true
+       modprobe mpls_gso ||true
+       ip netns exec "${ns2}" sysctl -qw net.mpls.platform_labels=65536
+       ip netns exec "${ns2}" ip -f mpls route add 1000 dev lo
+       ip netns exec "${ns2}" ip link set lo up
+       ip netns exec "${ns2}" sysctl -qw net.mpls.conf.testtun0.input=1
+       ip netns exec "${ns2}" sysctl -qw net.ipv4.conf.lo.rp_filter=0
+fi
+
 # Because packets are decapped by the tunnel they arrive on testtun0 from
 # the IP stack perspective.  Ensure reverse path filtering is disabled
 # otherwise we drop the TCP SYN as arriving on testtun0 instead of the
@@ -169,16 +249,22 @@ ip netns exec "${ns2}" sysctl -qw net.ipv4.conf.all.rp_filter=0
 # selected as the max of the "all" and device-specific values.
 ip netns exec "${ns2}" sysctl -qw net.ipv4.conf.testtun0.rp_filter=0
 ip netns exec "${ns2}" ip link set dev testtun0 up
-echo "test bpf encap with tunnel device decap"
-client_connect
-verify_data
+if [[ "$expect_tun_fail" == 1 ]]; then
+       # This tunnel mode is not supported, so we expect failure.
+       echo "test bpf encap with tunnel device decap (expect failure)"
+       ! client_connect
+else
+       echo "test bpf encap with tunnel device decap"
+       client_connect
+       verify_data
+       server_listen
+fi
 
 # serverside, use BPF for decap
 ip netns exec "${ns2}" ip link del dev testtun0
 ip netns exec "${ns2}" tc qdisc add dev veth2 clsact
 ip netns exec "${ns2}" tc filter add dev veth2 ingress \
        bpf direct-action object-file ./test_tc_tunnel.o section decap
-server_listen
 echo "test bpf encap with bpf decap"
 client_connect
 verify_data
index 19b5d03acc2a83d8425f5356dc53b2165ee96a1a..e2ebcaddbe7899eae4ffcd11473717b1bc15b545 100644 (file)
@@ -50,8 +50,9 @@
 #include "../../../include/linux/filter.h"
 
 #define MAX_INSNS      BPF_MAXINSNS
+#define MAX_TEST_INSNS 1000000
 #define MAX_FIXUPS     8
-#define MAX_NR_MAPS    14
+#define MAX_NR_MAPS    16
 #define MAX_TEST_RUNS  8
 #define POINTER_VALUE  0xcafe4all
 #define TEST_DATA_LEN  64
@@ -66,6 +67,7 @@ static int skips;
 struct bpf_test {
        const char *descr;
        struct bpf_insn insns[MAX_INSNS];
+       struct bpf_insn *fill_insns;
        int fixup_map_hash_8b[MAX_FIXUPS];
        int fixup_map_hash_48b[MAX_FIXUPS];
        int fixup_map_hash_16b[MAX_FIXUPS];
@@ -80,9 +82,13 @@ struct bpf_test {
        int fixup_cgroup_storage[MAX_FIXUPS];
        int fixup_percpu_cgroup_storage[MAX_FIXUPS];
        int fixup_map_spin_lock[MAX_FIXUPS];
+       int fixup_map_array_ro[MAX_FIXUPS];
+       int fixup_map_array_wo[MAX_FIXUPS];
+       int fixup_map_array_small[MAX_FIXUPS];
        const char *errstr;
        const char *errstr_unpriv;
        uint32_t retval, retval_unpriv, insn_processed;
+       int prog_len;
        enum {
                UNDEF,
                ACCEPT,
@@ -119,10 +125,11 @@ struct other_val {
 
 static void bpf_fill_ld_abs_vlan_push_pop(struct bpf_test *self)
 {
-       /* test: {skb->data[0], vlan_push} x 68 + {skb->data[0], vlan_pop} x 68 */
+       /* test: {skb->data[0], vlan_push} x 51 + {skb->data[0], vlan_pop} x 51 */
 #define PUSH_CNT 51
-       unsigned int len = BPF_MAXINSNS;
-       struct bpf_insn *insn = self->insns;
+       /* jump range is limited to 16 bit. PUSH_CNT of ld_abs needs room */
+       unsigned int len = (1 << 15) - PUSH_CNT * 2 * 5 * 6;
+       struct bpf_insn *insn = self->fill_insns;
        int i = 0, j, k = 0;
 
        insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
@@ -156,12 +163,14 @@ static void bpf_fill_ld_abs_vlan_push_pop(struct bpf_test *self)
        for (; i < len - 1; i++)
                insn[i] = BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 0xbef);
        insn[len - 1] = BPF_EXIT_INSN();
+       self->prog_len = len;
 }
 
 static void bpf_fill_jump_around_ld_abs(struct bpf_test *self)
 {
-       struct bpf_insn *insn = self->insns;
-       unsigned int len = BPF_MAXINSNS;
+       struct bpf_insn *insn = self->fill_insns;
+       /* jump range is limited to 16 bit. every ld_abs is replaced by 6 insns */
+       unsigned int len = (1 << 15) / 6;
        int i = 0;
 
        insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
@@ -171,11 +180,12 @@ static void bpf_fill_jump_around_ld_abs(struct bpf_test *self)
        while (i < len - 1)
                insn[i++] = BPF_LD_ABS(BPF_B, 1);
        insn[i] = BPF_EXIT_INSN();
+       self->prog_len = i + 1;
 }
 
 static void bpf_fill_rand_ld_dw(struct bpf_test *self)
 {
-       struct bpf_insn *insn = self->insns;
+       struct bpf_insn *insn = self->fill_insns;
        uint64_t res = 0;
        int i = 0;
 
@@ -193,6 +203,7 @@ static void bpf_fill_rand_ld_dw(struct bpf_test *self)
        insn[i++] = BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32);
        insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1);
        insn[i] = BPF_EXIT_INSN();
+       self->prog_len = i + 1;
        res ^= (res >> 32);
        self->retval = (uint32_t)res;
 }
@@ -277,13 +288,15 @@ static bool skip_unsupported_map(enum bpf_map_type map_type)
        return false;
 }
 
-static int create_map(uint32_t type, uint32_t size_key,
-                     uint32_t size_value, uint32_t max_elem)
+static int __create_map(uint32_t type, uint32_t size_key,
+                       uint32_t size_value, uint32_t max_elem,
+                       uint32_t extra_flags)
 {
        int fd;
 
        fd = bpf_create_map(type, size_key, size_value, max_elem,
-                           type == BPF_MAP_TYPE_HASH ? BPF_F_NO_PREALLOC : 0);
+                           (type == BPF_MAP_TYPE_HASH ?
+                            BPF_F_NO_PREALLOC : 0) | extra_flags);
        if (fd < 0) {
                if (skip_unsupported_map(type))
                        return -1;
@@ -293,6 +306,12 @@ static int create_map(uint32_t type, uint32_t size_key,
        return fd;
 }
 
+static int create_map(uint32_t type, uint32_t size_key,
+                     uint32_t size_value, uint32_t max_elem)
+{
+       return __create_map(type, size_key, size_value, max_elem, 0);
+}
+
 static void update_map(int fd, int index)
 {
        struct test_val value = {
@@ -519,9 +538,14 @@ static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
        int *fixup_cgroup_storage = test->fixup_cgroup_storage;
        int *fixup_percpu_cgroup_storage = test->fixup_percpu_cgroup_storage;
        int *fixup_map_spin_lock = test->fixup_map_spin_lock;
+       int *fixup_map_array_ro = test->fixup_map_array_ro;
+       int *fixup_map_array_wo = test->fixup_map_array_wo;
+       int *fixup_map_array_small = test->fixup_map_array_small;
 
-       if (test->fill_helper)
+       if (test->fill_helper) {
+               test->fill_insns = calloc(MAX_TEST_INSNS, sizeof(struct bpf_insn));
                test->fill_helper(test);
+       }
 
        /* Allocating HTs with 1 elem is fine here, since we only test
         * for verifier and not do a runtime lookup, so the only thing
@@ -642,6 +666,35 @@ static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
                        fixup_map_spin_lock++;
                } while (*fixup_map_spin_lock);
        }
+       if (*fixup_map_array_ro) {
+               map_fds[14] = __create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
+                                          sizeof(struct test_val), 1,
+                                          BPF_F_RDONLY_PROG);
+               update_map(map_fds[14], 0);
+               do {
+                       prog[*fixup_map_array_ro].imm = map_fds[14];
+                       fixup_map_array_ro++;
+               } while (*fixup_map_array_ro);
+       }
+       if (*fixup_map_array_wo) {
+               map_fds[15] = __create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
+                                          sizeof(struct test_val), 1,
+                                          BPF_F_WRONLY_PROG);
+               update_map(map_fds[15], 0);
+               do {
+                       prog[*fixup_map_array_wo].imm = map_fds[15];
+                       fixup_map_array_wo++;
+               } while (*fixup_map_array_wo);
+       }
+       if (*fixup_map_array_small) {
+               map_fds[16] = __create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
+                                          1, 1, 0);
+               update_map(map_fds[16], 0);
+               do {
+                       prog[*fixup_map_array_small].imm = map_fds[16];
+                       fixup_map_array_small++;
+               } while (*fixup_map_array_small);
+       }
 }
 
 static int set_admin(bool admin)
@@ -718,12 +771,17 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
                prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
        fixup_skips = skips;
        do_test_fixup(test, prog_type, prog, map_fds);
+       if (test->fill_insns) {
+               prog = test->fill_insns;
+               prog_len = test->prog_len;
+       } else {
+               prog_len = probe_filter_length(prog);
+       }
        /* If there were some map skips during fixup due to missing bpf
         * features, skip this test.
         */
        if (fixup_skips != skips)
                return;
-       prog_len = probe_filter_length(prog);
 
        pflags = 0;
        if (test->flags & F_LOAD_WITH_STRICT_ALIGNMENT)
@@ -731,7 +789,7 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
        if (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS)
                pflags |= BPF_F_ANY_ALIGNMENT;
        fd_prog = bpf_verify_program(prog_type, prog, prog_len, pflags,
-                                    "GPL", 0, bpf_vlog, sizeof(bpf_vlog), 1);
+                                    "GPL", 0, bpf_vlog, sizeof(bpf_vlog), 4);
        if (fd_prog < 0 && !bpf_probe_prog_type(prog_type, 0)) {
                printf("SKIP (unsupported program type %d)\n", prog_type);
                skips++;
@@ -830,6 +888,8 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
                goto fail_log;
        }
 close_fds:
+       if (test->fill_insns)
+               free(test->fill_insns);
        close(fd_prog);
        for (i = 0; i < MAX_NR_MAPS; i++)
                close(map_fds[i]);
index 4cdb63bf0521d546c65802cd7567a46a12e6769d..9a9fc6c9b70b5660ee83bca010b98b4af704dd2f 100644 (file)
@@ -52,6 +52,10 @@ struct ksym *ksym_search(long key)
        int start = 0, end = sym_cnt;
        int result;
 
+       /* kallsyms not loaded. return NULL */
+       if (sym_cnt <= 0)
+               return NULL;
+
        while (start < end) {
                size_t mid = start + (end - start) / 2;
 
index 0dcecaf3ec6f547b0f60a33621f36e2d9780d88a..bcb83196e459bed011b9df3b1dbb3404ac9a6088 100644 (file)
        .result = REJECT,
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
+{
+       "valid read map access into a read-only array 1",
+       .insns = {
+       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+       BPF_LD_MAP_FD(BPF_REG_1, 0),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .fixup_map_array_ro = { 3 },
+       .result = ACCEPT,
+       .retval = 28,
+},
+{
+       "valid read map access into a read-only array 2",
+       .insns = {
+       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+       BPF_LD_MAP_FD(BPF_REG_1, 0),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+       BPF_MOV64_IMM(BPF_REG_2, 4),
+       BPF_MOV64_IMM(BPF_REG_3, 0),
+       BPF_MOV64_IMM(BPF_REG_4, 0),
+       BPF_MOV64_IMM(BPF_REG_5, 0),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                    BPF_FUNC_csum_diff),
+       BPF_EXIT_INSN(),
+       },
+       .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       .fixup_map_array_ro = { 3 },
+       .result = ACCEPT,
+       .retval = -29,
+},
+{
+       "invalid write map access into a read-only array 1",
+       .insns = {
+       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+       BPF_LD_MAP_FD(BPF_REG_1, 0),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+       BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
+       BPF_EXIT_INSN(),
+       },
+       .fixup_map_array_ro = { 3 },
+       .result = REJECT,
+       .errstr = "write into map forbidden",
+},
+{
+       "invalid write map access into a read-only array 2",
+       .insns = {
+       BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+       BPF_LD_MAP_FD(BPF_REG_1, 0),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+       BPF_MOV64_IMM(BPF_REG_2, 0),
+       BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
+       BPF_MOV64_IMM(BPF_REG_4, 8),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                    BPF_FUNC_skb_load_bytes),
+       BPF_EXIT_INSN(),
+       },
+       .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       .fixup_map_array_ro = { 4 },
+       .result = REJECT,
+       .errstr = "write into map forbidden",
+},
+{
+       "valid write map access into a write-only array 1",
+       .insns = {
+       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+       BPF_LD_MAP_FD(BPF_REG_1, 0),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+       BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
+       BPF_MOV64_IMM(BPF_REG_0, 1),
+       BPF_EXIT_INSN(),
+       },
+       .fixup_map_array_wo = { 3 },
+       .result = ACCEPT,
+       .retval = 1,
+},
+{
+       "valid write map access into a write-only array 2",
+       .insns = {
+       BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+       BPF_LD_MAP_FD(BPF_REG_1, 0),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+       BPF_MOV64_IMM(BPF_REG_2, 0),
+       BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
+       BPF_MOV64_IMM(BPF_REG_4, 8),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                    BPF_FUNC_skb_load_bytes),
+       BPF_EXIT_INSN(),
+       },
+       .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       .fixup_map_array_wo = { 4 },
+       .result = ACCEPT,
+       .retval = 0,
+},
+{
+       "invalid read map access into a write-only array 1",
+       .insns = {
+       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+       BPF_LD_MAP_FD(BPF_REG_1, 0),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .fixup_map_array_wo = { 3 },
+       .result = REJECT,
+       .errstr = "read from map forbidden",
+},
+{
+       "invalid read map access into a write-only array 2",
+       .insns = {
+       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+       BPF_LD_MAP_FD(BPF_REG_1, 0),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+       BPF_MOV64_IMM(BPF_REG_2, 4),
+       BPF_MOV64_IMM(BPF_REG_3, 0),
+       BPF_MOV64_IMM(BPF_REG_4, 0),
+       BPF_MOV64_IMM(BPF_REG_5, 0),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                    BPF_FUNC_csum_diff),
+       BPF_EXIT_INSN(),
+       },
+       .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       .fixup_map_array_wo = { 3 },
+       .result = REJECT,
+       .errstr = "read from map forbidden",
+},
index f2ccae39ee66b32c8b60890dcacff0bd89c8abd0..fb11240b758b1a60f864473d4ee4caa1ff2932a4 100644 (file)
        .errstr = "call stack",
        .result = REJECT,
 },
+{
+       "calls: stack depth check in dead code",
+       .insns = {
+       /* main */
+       BPF_MOV64_IMM(BPF_REG_1, 0),
+       BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */
+       BPF_EXIT_INSN(),
+       /* A */
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+       BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2), /* call B */
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       /* B */
+       BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */
+       BPF_EXIT_INSN(),
+       /* C */
+       BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */
+       BPF_EXIT_INSN(),
+       /* D */
+       BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */
+       BPF_EXIT_INSN(),
+       /* E */
+       BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */
+       BPF_EXIT_INSN(),
+       /* F */
+       BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */
+       BPF_EXIT_INSN(),
+       /* G */
+       BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */
+       BPF_EXIT_INSN(),
+       /* H */
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .errstr = "call stack",
+       .result = REJECT,
+},
 {
        "calls: spill into caller stack frame",
        .insns = {
index c660deb582f169b117a24685d429230dc847521d..b0fda2877119c4af08277bd0f329f238c193313c 100644 (file)
        .errstr = "invalid bpf_context access",
        .result = REJECT,
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
-       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
 {
        "check cb access: half, wrong type",
diff --git a/tools/testing/selftests/bpf/verifier/direct_value_access.c b/tools/testing/selftests/bpf/verifier/direct_value_access.c
new file mode 100644 (file)
index 0000000..b9fb28e
--- /dev/null
@@ -0,0 +1,347 @@
+{
+       "direct map access, write test 1",
+       .insns = {
+       BPF_MOV64_IMM(BPF_REG_0, 1),
+       BPF_LD_MAP_VALUE(BPF_REG_1, 0, 0),
+       BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 4242),
+       BPF_EXIT_INSN(),
+       },
+       .fixup_map_array_48b = { 1 },
+       .result = ACCEPT,
+       .retval = 1,
+},
+{
+       "direct map access, write test 2",
+       .insns = {
+       BPF_MOV64_IMM(BPF_REG_0, 1),
+       BPF_LD_MAP_VALUE(BPF_REG_1, 0, 8),
+       BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 4242),
+       BPF_EXIT_INSN(),
+       },
+       .fixup_map_array_48b = { 1 },
+       .result = ACCEPT,
+       .retval = 1,
+},
+{
+       "direct map access, write test 3",
+       .insns = {
+       BPF_MOV64_IMM(BPF_REG_0, 1),
+       BPF_LD_MAP_VALUE(BPF_REG_1, 0, 8),
+       BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 4242),
+       BPF_EXIT_INSN(),
+       },
+       .fixup_map_array_48b = { 1 },
+       .result = ACCEPT,
+       .retval = 1,
+},
+{
+       "direct map access, write test 4",
+       .insns = {
+       BPF_MOV64_IMM(BPF_REG_0, 1),
+       BPF_LD_MAP_VALUE(BPF_REG_1, 0, 40),
+       BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 4242),
+       BPF_EXIT_INSN(),
+       },
+       .fixup_map_array_48b = { 1 },
+       .result = ACCEPT,
+       .retval = 1,
+},
+{
+       "direct map access, write test 5",
+       .insns = {
+       BPF_MOV64_IMM(BPF_REG_0, 1),
+       BPF_LD_MAP_VALUE(BPF_REG_1, 0, 32),
+       BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 4242),
+       BPF_EXIT_INSN(),
+       },
+       .fixup_map_array_48b = { 1 },
+       .result = ACCEPT,
+       .retval = 1,
+},
+{
+       "direct map access, write test 6",
+       .insns = {
+       BPF_MOV64_IMM(BPF_REG_0, 1),
+       BPF_LD_MAP_VALUE(BPF_REG_1, 0, 40),
+       BPF_ST_MEM(BPF_DW, BPF_REG_1, 4, 4242),
+       BPF_EXIT_INSN(),
+       },
+       .fixup_map_array_48b = { 1 },
+       .result = REJECT,
+       .errstr = "R1 min value is outside of the array range",
+},
+{
+       "direct map access, write test 7",
+       .insns = {
+       BPF_MOV64_IMM(BPF_REG_0, 1),
+       BPF_LD_MAP_VALUE(BPF_REG_1, 0, -1),
+       BPF_ST_MEM(BPF_DW, BPF_REG_1, 4, 4242),
+       BPF_EXIT_INSN(),
+       },
+       .fixup_map_array_48b = { 1 },
+       .result = REJECT,
+       .errstr = "direct value offset of 4294967295 is not allowed",
+},
+{
+       "direct map access, write test 8",
+       .insns = {
+       BPF_MOV64_IMM(BPF_REG_0, 1),
+       BPF_LD_MAP_VALUE(BPF_REG_1, 0, 1),
+       BPF_ST_MEM(BPF_DW, BPF_REG_1, -1, 4242),
+       BPF_EXIT_INSN(),
+       },
+       .fixup_map_array_48b = { 1 },
+       .result = ACCEPT,
+       .retval = 1,
+},
+{
+       "direct map access, write test 9",
+       .insns = {
+       BPF_MOV64_IMM(BPF_REG_0, 1),
+       BPF_LD_MAP_VALUE(BPF_REG_1, 0, 48),
+       BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 4242),
+       BPF_EXIT_INSN(),
+       },
+       .fixup_map_array_48b = { 1 },
+       .result = REJECT,
+       .errstr = "invalid access to map value pointer",
+},
+{
+       "direct map access, write test 10",
+       .insns = {
+       BPF_MOV64_IMM(BPF_REG_0, 1),
+       BPF_LD_MAP_VALUE(BPF_REG_1, 0, 47),
+       BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 4),
+       BPF_EXIT_INSN(),
+       },
+       .fixup_map_array_48b = { 1 },
+       .result = ACCEPT,
+       .retval = 1,
+},
+{
+       "direct map access, write test 11",
+       .insns = {
+       BPF_MOV64_IMM(BPF_REG_0, 1),
+       BPF_LD_MAP_VALUE(BPF_REG_1, 0, 48),
+       BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 4),
+       BPF_EXIT_INSN(),
+       },
+       .fixup_map_array_48b = { 1 },
+       .result = REJECT,
+       .errstr = "invalid access to map value pointer",
+},
+{
+       "direct map access, write test 12",
+       .insns = {
+       BPF_MOV64_IMM(BPF_REG_0, 1),
+       BPF_LD_MAP_VALUE(BPF_REG_1, 0, (1<<29)),
+       BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 4),
+       BPF_EXIT_INSN(),
+       },
+       .fixup_map_array_48b = { 1 },
+       .result = REJECT,
+       .errstr = "direct value offset of 536870912 is not allowed",
+},
+{
+       "direct map access, write test 13",
+       .insns = {
+       BPF_MOV64_IMM(BPF_REG_0, 1),
+       BPF_LD_MAP_VALUE(BPF_REG_1, 0, (1<<29)-1),
+       BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 4),
+       BPF_EXIT_INSN(),
+       },
+       .fixup_map_array_48b = { 1 },
+       .result = REJECT,
+       .errstr = "invalid access to map value pointer, value_size=48 off=536870911",
+},
+{
+       "direct map access, write test 14",
+       .insns = {
+       BPF_MOV64_IMM(BPF_REG_0, 1),
+       BPF_LD_MAP_VALUE(BPF_REG_1, 0, 47),
+       BPF_LD_MAP_VALUE(BPF_REG_2, 0, 46),
+       BPF_ST_MEM(BPF_H, BPF_REG_2, 0, 0xffff),
+       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+       BPF_EXIT_INSN(),
+       },
+       .fixup_map_array_48b = { 1, 3 },
+       .result = ACCEPT,
+       .retval = 0xff,
+},
+{
+       "direct map access, write test 15",
+       .insns = {
+       BPF_MOV64_IMM(BPF_REG_0, 1),
+       BPF_LD_MAP_VALUE(BPF_REG_1, 0, 46),
+       BPF_LD_MAP_VALUE(BPF_REG_2, 0, 46),
+       BPF_ST_MEM(BPF_H, BPF_REG_2, 0, 0xffff),
+       BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, 0),
+       BPF_EXIT_INSN(),
+       },
+       .fixup_map_array_48b = { 1, 3 },
+       .result = ACCEPT,
+       .retval = 0xffff,
+},
+{
+       "direct map access, write test 16",
+       .insns = {
+       BPF_MOV64_IMM(BPF_REG_0, 1),
+       BPF_LD_MAP_VALUE(BPF_REG_1, 0, 46),
+       BPF_LD_MAP_VALUE(BPF_REG_2, 0, 47),
+       BPF_ST_MEM(BPF_H, BPF_REG_2, 0, 0xffff),
+       BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, 0),
+       BPF_EXIT_INSN(),
+       },
+       .fixup_map_array_48b = { 1, 3 },
+       .result = REJECT,
+       .errstr = "invalid access to map value, value_size=48 off=47 size=2",
+},
+{
+       "direct map access, write test 17",
+       .insns = {
+       BPF_MOV64_IMM(BPF_REG_0, 1),
+       BPF_LD_MAP_VALUE(BPF_REG_1, 0, 46),
+       BPF_LD_MAP_VALUE(BPF_REG_2, 0, 46),
+       BPF_ST_MEM(BPF_H, BPF_REG_2, 1, 0xffff),
+       BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, 0),
+       BPF_EXIT_INSN(),
+       },
+       .fixup_map_array_48b = { 1, 3 },
+       .result = REJECT,
+       .errstr = "invalid access to map value, value_size=48 off=47 size=2",
+},
+{
+       "direct map access, write test 18",
+       .insns = {
+       BPF_MOV64_IMM(BPF_REG_0, 1),
+       BPF_LD_MAP_VALUE(BPF_REG_1, 0, 0),
+       BPF_ST_MEM(BPF_H, BPF_REG_1, 0, 42),
+       BPF_EXIT_INSN(),
+       },
+       .fixup_map_array_small = { 1 },
+       .result = REJECT,
+       .errstr = "R1 min value is outside of the array range",
+},
+{
+       "direct map access, write test 19",
+       .insns = {
+       BPF_MOV64_IMM(BPF_REG_0, 1),
+       BPF_LD_MAP_VALUE(BPF_REG_1, 0, 0),
+       BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
+       BPF_EXIT_INSN(),
+       },
+       .fixup_map_array_small = { 1 },
+       .result = ACCEPT,
+       .retval = 1,
+},
+{
+       "direct map access, write test 20",
+       .insns = {
+       BPF_MOV64_IMM(BPF_REG_0, 1),
+       BPF_LD_MAP_VALUE(BPF_REG_1, 0, 1),
+       BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
+       BPF_EXIT_INSN(),
+       },
+       .fixup_map_array_small = { 1 },
+       .result = REJECT,
+       .errstr = "invalid access to map value pointer",
+},
+{
+       "direct map access, invalid insn test 1",
+       .insns = {
+       BPF_MOV64_IMM(BPF_REG_0, 1),
+       BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_VALUE, 0, 1, 0, 47),
+       BPF_EXIT_INSN(),
+       },
+       .fixup_map_array_48b = { 1 },
+       .result = REJECT,
+       .errstr = "invalid bpf_ld_imm64 insn",
+},
+{
+       "direct map access, invalid insn test 2",
+       .insns = {
+       BPF_MOV64_IMM(BPF_REG_0, 1),
+       BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_VALUE, 1, 0, 0, 47),
+       BPF_EXIT_INSN(),
+       },
+       .fixup_map_array_48b = { 1 },
+       .result = REJECT,
+       .errstr = "BPF_LD_IMM64 uses reserved fields",
+},
+{
+       "direct map access, invalid insn test 3",
+       .insns = {
+       BPF_MOV64_IMM(BPF_REG_0, 1),
+       BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_VALUE, ~0, 0, 0, 47),
+       BPF_EXIT_INSN(),
+       },
+       .fixup_map_array_48b = { 1 },
+       .result = REJECT,
+       .errstr = "BPF_LD_IMM64 uses reserved fields",
+},
+{
+       "direct map access, invalid insn test 4",
+       .insns = {
+       BPF_MOV64_IMM(BPF_REG_0, 1),
+       BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_VALUE, 0, ~0, 0, 47),
+       BPF_EXIT_INSN(),
+       },
+       .fixup_map_array_48b = { 1 },
+       .result = REJECT,
+       .errstr = "invalid bpf_ld_imm64 insn",
+},
+{
+       "direct map access, invalid insn test 5",
+       .insns = {
+       BPF_MOV64_IMM(BPF_REG_0, 1),
+       BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_VALUE, ~0, ~0, 0, 47),
+       BPF_EXIT_INSN(),
+       },
+       .fixup_map_array_48b = { 1 },
+       .result = REJECT,
+       .errstr = "invalid bpf_ld_imm64 insn",
+},
+{
+       "direct map access, invalid insn test 6",
+       .insns = {
+       BPF_MOV64_IMM(BPF_REG_0, 1),
+       BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_FD, ~0, 0, 0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .fixup_map_array_48b = { 1 },
+       .result = REJECT,
+       .errstr = "BPF_LD_IMM64 uses reserved fields",
+},
+{
+       "direct map access, invalid insn test 7",
+       .insns = {
+       BPF_MOV64_IMM(BPF_REG_0, 1),
+       BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_FD, 0, ~0, 0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .fixup_map_array_48b = { 1 },
+       .result = REJECT,
+       .errstr = "invalid bpf_ld_imm64 insn",
+},
+{
+       "direct map access, invalid insn test 8",
+       .insns = {
+       BPF_MOV64_IMM(BPF_REG_0, 1),
+       BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_FD, ~0, ~0, 0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .fixup_map_array_48b = { 1 },
+       .result = REJECT,
+       .errstr = "invalid bpf_ld_imm64 insn",
+},
+{
+       "direct map access, invalid insn test 9",
+       .insns = {
+       BPF_MOV64_IMM(BPF_REG_0, 1),
+       BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_FD, 0, 0, 0, 47),
+       BPF_EXIT_INSN(),
+       },
+       .fixup_map_array_48b = { 1 },
+       .result = REJECT,
+       .errstr = "unrecognized bpf_ld_imm64 insn",
+},
index d2c75b889598a777384459a8acf538f09f3d5f35..0f18e62f0099bbcf2e6bb982138c42cda40b0d76 100644 (file)
        .result = ACCEPT,
        .retval = 5,
 },
+{
+       "ld_dw: xor semi-random 64 bit imms, test 5",
+       .insns = { },
+       .data = { },
+       .fill_helper = bpf_fill_rand_ld_dw,
+       .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       .result = ACCEPT,
+       .retval = 1000000 - 6,
+},
index 1e536ff121a5c0199a0316b9aec76ed1d0563313..8504ac9378098962be2d7bbec6f04e0907e5699d 100644 (file)
        .prog_type = BPF_PROG_TYPE_LWT_IN,
 },
 {
-       "indirect variable-offset stack access",
+       "indirect variable-offset stack access, unbounded",
+       .insns = {
+       BPF_MOV64_IMM(BPF_REG_2, 6),
+       BPF_MOV64_IMM(BPF_REG_3, 28),
+       /* Fill the top 16 bytes of the stack. */
+       BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
+       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+       /* Get an unknown value. */
+       BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_1, offsetof(struct bpf_sock_ops,
+                                                          bytes_received)),
+       /* Check the lower bound but don't check the upper one. */
+       BPF_JMP_IMM(BPF_JSLT, BPF_REG_4, 0, 4),
+       /* Point the lower bound to initialized stack. Offset is now in range
+        * from fp-16 to fp+0x7fffffffffffffef, i.e. max value is unbounded.
+        */
+       BPF_ALU64_IMM(BPF_SUB, BPF_REG_4, 16),
+       BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_10),
+       BPF_MOV64_IMM(BPF_REG_5, 8),
+       /* Dereference it indirectly. */
+       BPF_EMIT_CALL(BPF_FUNC_getsockopt),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .errstr = "R4 unbounded indirect variable offset stack access",
+       .result = REJECT,
+       .prog_type = BPF_PROG_TYPE_SOCK_OPS,
+},
+{
+       "indirect variable-offset stack access, max out of bound",
        .insns = {
        /* Fill the top 8 bytes of the stack */
        BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
        BPF_EXIT_INSN(),
        },
        .fixup_map_hash_8b = { 5 },
-       .errstr = "variable stack read R2",
+       .errstr = "R2 max value is outside of stack bound",
+       .result = REJECT,
+       .prog_type = BPF_PROG_TYPE_LWT_IN,
+},
+{
+       "indirect variable-offset stack access, min out of bound",
+       .insns = {
+       /* Fill the top 8 bytes of the stack */
+       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+       /* Get an unknown value */
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
+       /* Make it small and 4-byte aligned */
+       BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
+       BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 516),
+       /* add it to fp.  We now have either fp-516 or fp-512, but
+        * we don't know which
+        */
+       BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
+       /* dereference it indirectly */
+       BPF_LD_MAP_FD(BPF_REG_1, 0),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .fixup_map_hash_8b = { 5 },
+       .errstr = "R2 min value is outside of stack bound",
+       .result = REJECT,
+       .prog_type = BPF_PROG_TYPE_LWT_IN,
+},
+{
+       "indirect variable-offset stack access, max_off+size > max_initialized",
+       .insns = {
+       /* Fill only the second from top 8 bytes of the stack. */
+       BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
+       /* Get an unknown value. */
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
+       /* Make it small and 4-byte aligned. */
+       BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
+       BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 16),
+       /* Add it to fp.  We now have either fp-12 or fp-16, but we don't know
+        * which. fp-12 size 8 is partially uninitialized stack.
+        */
+       BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
+       /* Dereference it indirectly. */
+       BPF_LD_MAP_FD(BPF_REG_1, 0),
+       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .fixup_map_hash_8b = { 5 },
+       .errstr = "invalid indirect read from stack var_off",
+       .result = REJECT,
+       .prog_type = BPF_PROG_TYPE_LWT_IN,
+},
+{
+       "indirect variable-offset stack access, min_off < min_initialized",
+       .insns = {
+       /* Fill only the top 8 bytes of the stack. */
+       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+       /* Get an unknown value */
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
+       /* Make it small and 4-byte aligned. */
+       BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
+       BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 16),
+       /* Add it to fp.  We now have either fp-12 or fp-16, but we don't know
+        * which. fp-16 size 8 is partially uninitialized stack.
+        */
+       BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
+       /* Dereference it indirectly. */
+       BPF_LD_MAP_FD(BPF_REG_1, 0),
+       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .fixup_map_hash_8b = { 5 },
+       .errstr = "invalid indirect read from stack var_off",
        .result = REJECT,
        .prog_type = BPF_PROG_TYPE_LWT_IN,
 },
+{
+       "indirect variable-offset stack access, priv vs unpriv",
+       .insns = {
+       /* Fill the top 16 bytes of the stack. */
+       BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
+       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+       /* Get an unknown value. */
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
+       /* Make it small and 4-byte aligned. */
+       BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
+       BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 16),
+       /* Add it to fp.  We now have either fp-12 or fp-16, we don't know
+        * which, but either way it points to initialized stack.
+        */
+       BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
+       /* Dereference it indirectly. */
+       BPF_LD_MAP_FD(BPF_REG_1, 0),
+       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .fixup_map_hash_8b = { 6 },
+       .errstr_unpriv = "R2 stack pointer arithmetic goes out of range, prohibited for !root",
+       .result_unpriv = REJECT,
+       .result = ACCEPT,
+       .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+       "indirect variable-offset stack access, uninitialized",
+       .insns = {
+       BPF_MOV64_IMM(BPF_REG_2, 6),
+       BPF_MOV64_IMM(BPF_REG_3, 28),
+       /* Fill the top 16 bytes of the stack. */
+       BPF_ST_MEM(BPF_W, BPF_REG_10, -16, 0),
+       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+       /* Get an unknown value. */
+       BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1, 0),
+       /* Make it small and 4-byte aligned. */
+       BPF_ALU64_IMM(BPF_AND, BPF_REG_4, 4),
+       BPF_ALU64_IMM(BPF_SUB, BPF_REG_4, 16),
+       /* Add it to fp.  We now have either fp-12 or fp-16, we don't know
+        * which, but either way it points to initialized stack.
+        */
+       BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_10),
+       BPF_MOV64_IMM(BPF_REG_5, 8),
+       /* Dereference it indirectly. */
+       BPF_EMIT_CALL(BPF_FUNC_getsockopt),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .errstr = "invalid indirect read from stack var_off",
+       .result = REJECT,
+       .prog_type = BPF_PROG_TYPE_SOCK_OPS,
+},
+{
+       "indirect variable-offset stack access, ok",
+       .insns = {
+       /* Fill the top 16 bytes of the stack. */
+       BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
+       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+       /* Get an unknown value. */
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
+       /* Make it small and 4-byte aligned. */
+       BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
+       BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 16),
+       /* Add it to fp.  We now have either fp-12 or fp-16, we don't know
+        * which, but either way it points to initialized stack.
+        */
+       BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
+       /* Dereference it indirectly. */
+       BPF_LD_MAP_FD(BPF_REG_1, 0),
+       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .fixup_map_hash_8b = { 6 },
+       .result = ACCEPT,
+       .prog_type = BPF_PROG_TYPE_LWT_IN,
+},
index c4cf6e6d800ebe3d2d595805397ce1b3f70e7de3..1c30f302a1e75e42999ac97159673122cd026839 100755 (executable)
@@ -11,6 +11,7 @@ lib_dir=$(dirname $0)/../../../net/forwarding
 
 ALL_TESTS="
        rif_set_addr_test
+       rif_vrf_set_addr_test
        rif_inherit_bridge_addr_test
        rif_non_inherit_bridge_addr_test
        vlan_interface_deletion_test
@@ -26,6 +27,7 @@ ALL_TESTS="
        lag_dev_deletion_test
        vlan_interface_uppers_test
        bridge_extern_learn_test
+       neigh_offload_test
        devlink_reload_test
 "
 NUM_NETIFS=2
@@ -98,6 +100,25 @@ rif_set_addr_test()
        ip link set dev $swp1 addr $swp1_mac
 }
 
+rif_vrf_set_addr_test()
+{
+       # Test that it is possible to set an IP address on a VRF upper despite
+       # its random MAC address.
+       RET=0
+
+       ip link add name vrf-test type vrf table 10
+       ip link set dev $swp1 master vrf-test
+
+       ip -4 address add 192.0.2.1/24 dev vrf-test
+       check_err $? "failed to set IPv4 address on VRF"
+       ip -6 address add 2001:db8:1::1/64 dev vrf-test
+       check_err $? "failed to set IPv6 address on VRF"
+
+       log_test "RIF - setting IP address on VRF"
+
+       ip link del dev vrf-test
+}
+
 rif_inherit_bridge_addr_test()
 {
        RET=0
@@ -561,6 +582,31 @@ bridge_extern_learn_test()
        ip link del dev br0
 }
 
+neigh_offload_test()
+{
+       # Test that IPv4 and IPv6 neighbour entries are marked as offloaded
+       RET=0
+
+       ip -4 address add 192.0.2.1/24 dev $swp1
+       ip -6 address add 2001:db8:1::1/64 dev $swp1
+
+       ip -4 neigh add 192.0.2.2 lladdr de:ad:be:ef:13:37 nud perm dev $swp1
+       ip -6 neigh add 2001:db8:1::2 lladdr de:ad:be:ef:13:37 nud perm \
+               dev $swp1
+
+       ip -4 neigh show dev $swp1 | grep 192.0.2.2 | grep -q offload
+       check_err $? "ipv4 neigh entry not marked as offloaded when should"
+       ip -6 neigh show dev $swp1 | grep 2001:db8:1::2 | grep -q offload
+       check_err $? "ipv6 neigh entry not marked as offloaded when should"
+
+       log_test "neighbour offload indication"
+
+       ip -6 neigh del 2001:db8:1::2 dev $swp1
+       ip -4 neigh del 192.0.2.2 dev $swp1
+       ip -6 address del 2001:db8:1::1/64 dev $swp1
+       ip -4 address del 192.0.2.1/24 dev $swp1
+}
+
 devlink_reload_test()
 {
        # Test that after executing all the above configuration tests, a
index 3c1f4bdf90000c7f163fd72a2974bfabf982c47f..f8588cca2bef4bfe4d3cdf2afdb6586f21e67894 100644 (file)
@@ -1,3 +1,5 @@
+include ../../../../scripts/Kbuild.include
+
 all:
 
 top_srcdir = ../../../..
@@ -17,6 +19,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/state_test
 TEST_GEN_PROGS_x86_64 += x86_64/evmcs_test
 TEST_GEN_PROGS_x86_64 += x86_64/hyperv_cpuid
 TEST_GEN_PROGS_x86_64 += x86_64/vmx_close_while_nested_test
+TEST_GEN_PROGS_x86_64 += x86_64/smm_test
 TEST_GEN_PROGS_x86_64 += dirty_log_test
 TEST_GEN_PROGS_x86_64 += clear_dirty_log_test
 
@@ -29,8 +32,12 @@ LIBKVM += $(LIBKVM_$(UNAME_M))
 INSTALL_HDR_PATH = $(top_srcdir)/usr
 LINUX_HDR_PATH = $(INSTALL_HDR_PATH)/include/
 LINUX_TOOL_INCLUDE = $(top_srcdir)/tools/include
-CFLAGS += -O2 -g -std=gnu99 -I$(LINUX_TOOL_INCLUDE) -I$(LINUX_HDR_PATH) -Iinclude -I$(<D) -Iinclude/$(UNAME_M) -I..
-LDFLAGS += -pthread
+CFLAGS += -O2 -g -std=gnu99 -fno-stack-protector -fno-PIE -I$(LINUX_TOOL_INCLUDE) -I$(LINUX_HDR_PATH) -Iinclude -I$(<D) -Iinclude/$(UNAME_M) -I..
+
+no-pie-option := $(call try-run, echo 'int main() { return 0; }' | \
+        $(CC) -Werror $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) -no-pie -x c - -o "$$TMP", -no-pie)
+
+LDFLAGS += -pthread $(no-pie-option)
 
 # After inclusion, $(OUTPUT) is defined and
 # $(TEST_GEN_PROGS) starts with $(OUTPUT)/
index a84785b0255776eb67b5df72df87575710f6cfaa..07b71ad9734af57f101f0f96fdc57b741b41b0e7 100644 (file)
@@ -102,6 +102,7 @@ vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva);
 struct kvm_run *vcpu_state(struct kvm_vm *vm, uint32_t vcpuid);
 void vcpu_run(struct kvm_vm *vm, uint32_t vcpuid);
 int _vcpu_run(struct kvm_vm *vm, uint32_t vcpuid);
+void vcpu_run_complete_io(struct kvm_vm *vm, uint32_t vcpuid);
 void vcpu_set_mp_state(struct kvm_vm *vm, uint32_t vcpuid,
                       struct kvm_mp_state *mp_state);
 void vcpu_regs_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs);
index e2884c2b81fff80c1ec6c261828dbb0493b3e98b..6063d5b2f3561c450778f86f3d1474390d79b5ec 100644 (file)
@@ -778,6 +778,33 @@ void vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index,
 #define MSR_IA32_APICBASE_ENABLE       (1<<11)
 #define MSR_IA32_APICBASE_BASE         (0xfffff<<12)
 
+#define APIC_BASE_MSR  0x800
+#define X2APIC_ENABLE  (1UL << 10)
+#define        APIC_ICR        0x300
+#define                APIC_DEST_SELF          0x40000
+#define                APIC_DEST_ALLINC        0x80000
+#define                APIC_DEST_ALLBUT        0xC0000
+#define                APIC_ICR_RR_MASK        0x30000
+#define                APIC_ICR_RR_INVALID     0x00000
+#define                APIC_ICR_RR_INPROG      0x10000
+#define                APIC_ICR_RR_VALID       0x20000
+#define                APIC_INT_LEVELTRIG      0x08000
+#define                APIC_INT_ASSERT         0x04000
+#define                APIC_ICR_BUSY           0x01000
+#define                APIC_DEST_LOGICAL       0x00800
+#define                APIC_DEST_PHYSICAL      0x00000
+#define                APIC_DM_FIXED           0x00000
+#define                APIC_DM_FIXED_MASK      0x00700
+#define                APIC_DM_LOWEST          0x00100
+#define                APIC_DM_SMI             0x00200
+#define                APIC_DM_REMRD           0x00300
+#define                APIC_DM_NMI             0x00400
+#define                APIC_DM_INIT            0x00500
+#define                APIC_DM_STARTUP         0x00600
+#define                APIC_DM_EXTINT          0x00700
+#define                APIC_VECTOR_MASK        0x000FF
+#define        APIC_ICR2       0x310
+
 #define MSR_IA32_TSCDEADLINE           0x000006e0
 
 #define MSR_IA32_UCODE_WRITE           0x00000079
index b52cfdefecbfe9f760b569baf757d2228fe0c6af..4ca96b228e46ba248476803583cb94d14410ff16 100644 (file)
@@ -91,6 +91,11 @@ static void vm_open(struct kvm_vm *vm, int perm, unsigned long type)
        if (vm->kvm_fd < 0)
                exit(KSFT_SKIP);
 
+       if (!kvm_check_cap(KVM_CAP_IMMEDIATE_EXIT)) {
+               fprintf(stderr, "immediate_exit not available, skipping test\n");
+               exit(KSFT_SKIP);
+       }
+
        vm->fd = ioctl(vm->kvm_fd, KVM_CREATE_VM, type);
        TEST_ASSERT(vm->fd >= 0, "KVM_CREATE_VM ioctl failed, "
                "rc: %i errno: %i", vm->fd, errno);
@@ -1121,6 +1126,22 @@ int _vcpu_run(struct kvm_vm *vm, uint32_t vcpuid)
        return rc;
 }
 
+void vcpu_run_complete_io(struct kvm_vm *vm, uint32_t vcpuid)
+{
+       struct vcpu *vcpu = vcpu_find(vm, vcpuid);
+       int ret;
+
+       TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
+
+       vcpu->state->immediate_exit = 1;
+       ret = ioctl(vcpu->fd, KVM_RUN, NULL);
+       vcpu->state->immediate_exit = 0;
+
+       TEST_ASSERT(ret == -1 && errno == EINTR,
+                   "KVM_RUN IOCTL didn't exit immediately, rc: %i, errno: %i",
+                   ret, errno);
+}
+
 /*
  * VM VCPU Set MP State
  *
index f28127f4a3af63cb9ac15d2124f425e7492fccda..dc7fae9fa424cf2b45fb7acf10c4b58c272763a0 100644 (file)
@@ -1030,6 +1030,14 @@ struct kvm_x86_state *vcpu_save_state(struct kvm_vm *vm, uint32_t vcpuid)
                            nested_size, sizeof(state->nested_));
        }
 
+       /*
+        * When KVM exits to userspace with KVM_EXIT_IO, KVM guarantees
+        * guest state is consistent only after userspace re-enters the
+        * kernel with KVM_RUN.  Complete IO prior to migrating state
+        * to a new VM.
+        */
+       vcpu_run_complete_io(vm, vcpuid);
+
        nmsrs = kvm_get_num_msrs(vm);
        list = malloc(sizeof(*list) + nmsrs * sizeof(list->indices[0]));
        list->nmsrs = nmsrs;
@@ -1093,12 +1101,6 @@ void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_x86_state *s
        struct vcpu *vcpu = vcpu_find(vm, vcpuid);
        int r;
 
-       if (state->nested.size) {
-               r = ioctl(vcpu->fd, KVM_SET_NESTED_STATE, &state->nested);
-               TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_NESTED_STATE, r: %i",
-                       r);
-       }
-
        r = ioctl(vcpu->fd, KVM_SET_XSAVE, &state->xsave);
         TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_XSAVE, r: %i",
                 r);
@@ -1130,4 +1132,10 @@ void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_x86_state *s
        r = ioctl(vcpu->fd, KVM_SET_REGS, &state->regs);
         TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_REGS, r: %i",
                 r);
+
+       if (state->nested.size) {
+               r = ioctl(vcpu->fd, KVM_SET_NESTED_STATE, &state->nested);
+               TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_NESTED_STATE, r: %i",
+                       r);
+       }
 }
index d503a51fad307526397f24a333080065e1ab4e1b..7c2c4d4055a80bf49bc56870a1ba828df8ee577d 100644 (file)
@@ -87,22 +87,25 @@ int main(int argc, char *argv[])
        while (1) {
                rc = _vcpu_run(vm, VCPU_ID);
 
-               if (run->exit_reason == KVM_EXIT_IO) {
-                       switch (get_ucall(vm, VCPU_ID, &uc)) {
-                       case UCALL_SYNC:
-                               /* emulate hypervisor clearing CR4.OSXSAVE */
-                               vcpu_sregs_get(vm, VCPU_ID, &sregs);
-                               sregs.cr4 &= ~X86_CR4_OSXSAVE;
-                               vcpu_sregs_set(vm, VCPU_ID, &sregs);
-                               break;
-                       case UCALL_ABORT:
-                               TEST_ASSERT(false, "Guest CR4 bit (OSXSAVE) unsynchronized with CPUID bit.");
-                               break;
-                       case UCALL_DONE:
-                               goto done;
-                       default:
-                               TEST_ASSERT(false, "Unknown ucall 0x%x.", uc.cmd);
-                       }
+               TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
+                           "Unexpected exit reason: %u (%s),\n",
+                           run->exit_reason,
+                           exit_reason_str(run->exit_reason));
+
+               switch (get_ucall(vm, VCPU_ID, &uc)) {
+               case UCALL_SYNC:
+                       /* emulate hypervisor clearing CR4.OSXSAVE */
+                       vcpu_sregs_get(vm, VCPU_ID, &sregs);
+                       sregs.cr4 &= ~X86_CR4_OSXSAVE;
+                       vcpu_sregs_set(vm, VCPU_ID, &sregs);
+                       break;
+               case UCALL_ABORT:
+                       TEST_ASSERT(false, "Guest CR4 bit (OSXSAVE) unsynchronized with CPUID bit.");
+                       break;
+               case UCALL_DONE:
+                       goto done;
+               default:
+                       TEST_ASSERT(false, "Unknown ucall 0x%x.", uc.cmd);
                }
        }
 
index c49c2a28b0eb290ccd6c51498a0b9fd716b58b07..36669684eca58a6c09140453f70a403cf0119348 100644 (file)
@@ -123,8 +123,6 @@ int main(int argc, char *argv[])
                            stage, run->exit_reason,
                            exit_reason_str(run->exit_reason));
 
-               memset(&regs1, 0, sizeof(regs1));
-               vcpu_regs_get(vm, VCPU_ID, &regs1);
                switch (get_ucall(vm, VCPU_ID, &uc)) {
                case UCALL_ABORT:
                        TEST_ASSERT(false, "%s at %s:%d", (const char *)uc.args[0],
@@ -144,6 +142,9 @@ int main(int argc, char *argv[])
                            stage, (ulong)uc.args[1]);
 
                state = vcpu_save_state(vm, VCPU_ID);
+               memset(&regs1, 0, sizeof(regs1));
+               vcpu_regs_get(vm, VCPU_ID, &regs1);
+
                kvm_vm_release(vm);
 
                /* Restore state in a new VM.  */
diff --git a/tools/testing/selftests/kvm/x86_64/smm_test.c b/tools/testing/selftests/kvm/x86_64/smm_test.c
new file mode 100644 (file)
index 0000000..fb80869
--- /dev/null
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018, Red Hat, Inc.
+ *
+ * Tests for SMM.
+ */
+#define _GNU_SOURCE /* for program_invocation_short_name */
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <string.h>
+#include <sys/ioctl.h>
+
+#include "test_util.h"
+
+#include "kvm_util.h"
+
+#include "vmx.h"
+
+#define VCPU_ID              1
+
+#define PAGE_SIZE  4096
+
+#define SMRAM_SIZE 65536
+#define SMRAM_MEMSLOT ((1 << 16) | 1)
+#define SMRAM_PAGES (SMRAM_SIZE / PAGE_SIZE)
+#define SMRAM_GPA 0x1000000
+#define SMRAM_STAGE 0xfe
+
+#define STR(x) #x
+#define XSTR(s) STR(s)
+
+#define SYNC_PORT 0xe
+#define DONE 0xff
+
+/*
+ * This is compiled as normal 64-bit code, however, SMI handler is executed
+ * in real-address mode. To stay simple we're limiting ourselves to a mode
+ * independent subset of asm here.
+ * SMI handler always report back fixed stage SMRAM_STAGE.
+ */
+uint8_t smi_handler[] = {
+       0xb0, SMRAM_STAGE,    /* mov $SMRAM_STAGE, %al */
+       0xe4, SYNC_PORT,      /* in $SYNC_PORT, %al */
+       0x0f, 0xaa,           /* rsm */
+};
+
+void sync_with_host(uint64_t phase)
+{
+       asm volatile("in $" XSTR(SYNC_PORT)", %%al \n"
+                    : : "a" (phase));
+}
+
+void self_smi(void)
+{
+       wrmsr(APIC_BASE_MSR + (APIC_ICR >> 4),
+             APIC_DEST_SELF | APIC_INT_ASSERT | APIC_DM_SMI);
+}
+
+void guest_code(struct vmx_pages *vmx_pages)
+{
+       uint64_t apicbase = rdmsr(MSR_IA32_APICBASE);
+
+       sync_with_host(1);
+
+       wrmsr(MSR_IA32_APICBASE, apicbase | X2APIC_ENABLE);
+
+       sync_with_host(2);
+
+       self_smi();
+
+       sync_with_host(4);
+
+       if (vmx_pages) {
+               GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
+
+               sync_with_host(5);
+
+               self_smi();
+
+               sync_with_host(7);
+       }
+
+       sync_with_host(DONE);
+}
+
+int main(int argc, char *argv[])
+{
+       struct vmx_pages *vmx_pages = NULL;
+       vm_vaddr_t vmx_pages_gva = 0;
+
+       struct kvm_regs regs;
+       struct kvm_vm *vm;
+       struct kvm_run *run;
+       struct kvm_x86_state *state;
+       int stage, stage_reported;
+
+       /* Create VM */
+       vm = vm_create_default(VCPU_ID, 0, guest_code);
+
+       vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
+
+       run = vcpu_state(vm, VCPU_ID);
+
+       vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, SMRAM_GPA,
+                                   SMRAM_MEMSLOT, SMRAM_PAGES, 0);
+       TEST_ASSERT(vm_phy_pages_alloc(vm, SMRAM_PAGES, SMRAM_GPA, SMRAM_MEMSLOT)
+                   == SMRAM_GPA, "could not allocate guest physical addresses?");
+
+       memset(addr_gpa2hva(vm, SMRAM_GPA), 0x0, SMRAM_SIZE);
+       memcpy(addr_gpa2hva(vm, SMRAM_GPA) + 0x8000, smi_handler,
+              sizeof(smi_handler));
+
+       vcpu_set_msr(vm, VCPU_ID, MSR_IA32_SMBASE, SMRAM_GPA);
+
+       if (kvm_check_cap(KVM_CAP_NESTED_STATE)) {
+               vmx_pages = vcpu_alloc_vmx(vm, &vmx_pages_gva);
+               vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva);
+       } else {
+               printf("will skip SMM test with VMX enabled\n");
+               vcpu_args_set(vm, VCPU_ID, 1, 0);
+       }
+
+       for (stage = 1;; stage++) {
+               _vcpu_run(vm, VCPU_ID);
+               TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
+                           "Stage %d: unexpected exit reason: %u (%s),\n",
+                           stage, run->exit_reason,
+                           exit_reason_str(run->exit_reason));
+
+               memset(&regs, 0, sizeof(regs));
+               vcpu_regs_get(vm, VCPU_ID, &regs);
+
+               stage_reported = regs.rax & 0xff;
+
+               if (stage_reported == DONE)
+                       goto done;
+
+               TEST_ASSERT(stage_reported == stage ||
+                           stage_reported == SMRAM_STAGE,
+                           "Unexpected stage: #%x, got %x",
+                           stage, stage_reported);
+
+               state = vcpu_save_state(vm, VCPU_ID);
+               kvm_vm_release(vm);
+               kvm_vm_restart(vm, O_RDWR);
+               vm_vcpu_add(vm, VCPU_ID, 0, 0);
+               vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
+               vcpu_load_state(vm, VCPU_ID, state);
+               run = vcpu_state(vm, VCPU_ID);
+               free(state);
+       }
+
+done:
+       kvm_vm_free(vm);
+}
index 4b3f556265f1b95790e318da95adfb4073526514..e0a3c0204b7cd11c5da7024bea68f0da71e41bab 100644 (file)
@@ -156,8 +156,6 @@ int main(int argc, char *argv[])
                            stage, run->exit_reason,
                            exit_reason_str(run->exit_reason));
 
-               memset(&regs1, 0, sizeof(regs1));
-               vcpu_regs_get(vm, VCPU_ID, &regs1);
                switch (get_ucall(vm, VCPU_ID, &uc)) {
                case UCALL_ABORT:
                        TEST_ASSERT(false, "%s at %s:%d", (const char *)uc.args[0],
@@ -177,6 +175,9 @@ int main(int argc, char *argv[])
                            stage, (ulong)uc.args[1]);
 
                state = vcpu_save_state(vm, VCPU_ID);
+               memset(&regs1, 0, sizeof(regs1));
+               vcpu_regs_get(vm, VCPU_ID, &regs1);
+
                kvm_vm_release(vm);
 
                /* Restore state in a new VM.  */
index 1080ff55a788f720f240271741fbc38680061b7a..9457aaeae092b7b4145fa7bdc9b6b9fa6acb3e4e 100755 (executable)
@@ -9,7 +9,8 @@ ret=0
 ksft_skip=4
 
 # all tests in this script. Can be overridden with -t option
-TESTS="unregister down carrier nexthop ipv6_rt ipv4_rt ipv6_addr_metric ipv4_addr_metric ipv6_route_metrics ipv4_route_metrics"
+TESTS="unregister down carrier nexthop ipv6_rt ipv4_rt ipv6_addr_metric ipv4_addr_metric ipv6_route_metrics ipv4_route_metrics ipv4_route_v6_gw"
+
 VERBOSE=0
 PAUSE_ON_FAIL=no
 PAUSE=no
@@ -48,6 +49,7 @@ setup()
 {
        set -e
        ip netns add ns1
+       ip netns set ns1 auto
        $IP link set dev lo up
        ip netns exec ns1 sysctl -qw net.ipv4.ip_forward=1
        ip netns exec ns1 sysctl -qw net.ipv6.conf.all.forwarding=1
@@ -605,6 +607,39 @@ run_cmd()
        return $rc
 }
 
+check_expected()
+{
+       local out="$1"
+       local expected="$2"
+       local rc=0
+
+       [ "${out}" = "${expected}" ] && return 0
+
+       if [ -z "${out}" ]; then
+               if [ "$VERBOSE" = "1" ]; then
+                       printf "\nNo route entry found\n"
+                       printf "Expected:\n"
+                       printf "    ${expected}\n"
+               fi
+               return 1
+       fi
+
+       # tricky way to convert output to 1-line without ip's
+       # messy '\'; this drops all extra white space
+       out=$(echo ${out})
+       if [ "${out}" != "${expected}" ]; then
+               rc=1
+               if [ "${VERBOSE}" = "1" ]; then
+                       printf "    Unexpected route entry. Have:\n"
+                       printf "        ${out}\n"
+                       printf "    Expected:\n"
+                       printf "        ${expected}\n\n"
+               fi
+       fi
+
+       return $rc
+}
+
 # add route for a prefix, flushing any existing routes first
 # expected to be the first step of a test
 add_route6()
@@ -652,31 +687,7 @@ check_route6()
        pfx=$1
 
        out=$($IP -6 ro ls match ${pfx} | sed -e 's/ pref medium//')
-       [ "${out}" = "${expected}" ] && return 0
-
-       if [ -z "${out}" ]; then
-               if [ "$VERBOSE" = "1" ]; then
-                       printf "\nNo route entry found\n"
-                       printf "Expected:\n"
-                       printf "    ${expected}\n"
-               fi
-               return 1
-       fi
-
-       # tricky way to convert output to 1-line without ip's
-       # messy '\'; this drops all extra white space
-       out=$(echo ${out})
-       if [ "${out}" != "${expected}" ]; then
-               rc=1
-               if [ "${VERBOSE}" = "1" ]; then
-                       printf "    Unexpected route entry. Have:\n"
-                       printf "        ${out}\n"
-                       printf "    Expected:\n"
-                       printf "        ${expected}\n\n"
-               fi
-       fi
-
-       return $rc
+       check_expected "${out}" "${expected}"
 }
 
 route_cleanup()
@@ -698,6 +709,7 @@ route_setup()
        set -e
 
        ip netns add ns2
+       ip netns set ns2 auto
        ip -netns ns2 link set dev lo up
        ip netns exec ns2 sysctl -qw net.ipv4.ip_forward=1
        ip netns exec ns2 sysctl -qw net.ipv6.conf.all.forwarding=1
@@ -725,7 +737,7 @@ route_setup()
        ip -netns ns2 addr add 172.16.103.2/24 dev veth4
        ip -netns ns2 addr add 172.16.104.1/24 dev dummy1
 
-       set +ex
+       set +e
 }
 
 # assumption is that basic add of a single path route works
@@ -960,7 +972,8 @@ ipv6_addr_metric_test()
        run_cmd "$IP li set dev dummy2 down"
        rc=$?
        if [ $rc -eq 0 ]; then
-               check_route6 ""
+               out=$($IP -6 ro ls match 2001:db8:104::/64)
+               check_expected "${out}" ""
                rc=$?
        fi
        log_test $rc 0 "Prefix route removed on link down"
@@ -1091,38 +1104,13 @@ check_route()
        local pfx
        local expected="$1"
        local out
-       local rc=0
 
        set -- $expected
        pfx=$1
        [ "${pfx}" = "unreachable" ] && pfx=$2
 
        out=$($IP ro ls match ${pfx})
-       [ "${out}" = "${expected}" ] && return 0
-
-       if [ -z "${out}" ]; then
-               if [ "$VERBOSE" = "1" ]; then
-                       printf "\nNo route entry found\n"
-                       printf "Expected:\n"
-                       printf "    ${expected}\n"
-               fi
-               return 1
-       fi
-
-       # tricky way to convert output to 1-line without ip's
-       # messy '\'; this drops all extra white space
-       out=$(echo ${out})
-       if [ "${out}" != "${expected}" ]; then
-               rc=1
-               if [ "${VERBOSE}" = "1" ]; then
-                       printf "    Unexpected route entry. Have:\n"
-                       printf "        ${out}\n"
-                       printf "    Expected:\n"
-                       printf "        ${expected}\n\n"
-               fi
-       fi
-
-       return $rc
+       check_expected "${out}" "${expected}"
 }
 
 # assumption is that basic add of a single path route works
@@ -1387,7 +1375,8 @@ ipv4_addr_metric_test()
        run_cmd "$IP li set dev dummy2 down"
        rc=$?
        if [ $rc -eq 0 ]; then
-               check_route ""
+               out=$($IP ro ls match 172.16.104.0/24)
+               check_expected "${out}" ""
                rc=$?
        fi
        log_test $rc 0 "Prefix route removed on link down"
@@ -1442,6 +1431,70 @@ ipv4_route_metrics_test()
        route_cleanup
 }
 
+ipv4_route_v6_gw_test()
+{
+       local rc
+
+       echo
+       echo "IPv4 route with IPv6 gateway tests"
+
+       route_setup
+       sleep 2
+
+       #
+       # single path route
+       #
+       run_cmd "$IP ro add 172.16.104.0/24 via inet6 2001:db8:101::2"
+       rc=$?
+       log_test $rc 0 "Single path route with IPv6 gateway"
+       if [ $rc -eq 0 ]; then
+               check_route "172.16.104.0/24 via inet6 2001:db8:101::2 dev veth1"
+       fi
+
+       run_cmd "ip netns exec ns1 ping -w1 -c1 172.16.104.1"
+       log_test $rc 0 "Single path route with IPv6 gateway - ping"
+
+       run_cmd "$IP ro del 172.16.104.0/24 via inet6 2001:db8:101::2"
+       rc=$?
+       log_test $rc 0 "Single path route delete"
+       if [ $rc -eq 0 ]; then
+               check_route "172.16.112.0/24"
+       fi
+
+       #
+       # multipath - v6 then v4
+       #
+       run_cmd "$IP ro add 172.16.104.0/24 nexthop via inet6 2001:db8:101::2 dev veth1 nexthop via 172.16.103.2 dev veth3"
+       rc=$?
+       log_test $rc 0 "Multipath route add - v6 nexthop then v4"
+       if [ $rc -eq 0 ]; then
+               check_route "172.16.104.0/24 nexthop via inet6 2001:db8:101::2 dev veth1 weight 1 nexthop via 172.16.103.2 dev veth3 weight 1"
+       fi
+
+       run_cmd "$IP ro del 172.16.104.0/24 nexthop via 172.16.103.2 dev veth3 nexthop via inet6 2001:db8:101::2 dev veth1"
+       log_test $? 2 "    Multipath route delete - nexthops in wrong order"
+
+       run_cmd "$IP ro del 172.16.104.0/24 nexthop via inet6 2001:db8:101::2 dev veth1 nexthop via 172.16.103.2 dev veth3"
+       log_test $? 0 "    Multipath route delete exact match"
+
+       #
+       # multipath - v4 then v6
+       #
+       run_cmd "$IP ro add 172.16.104.0/24 nexthop via 172.16.103.2 dev veth3 nexthop via inet6 2001:db8:101::2 dev veth1"
+       rc=$?
+       log_test $rc 0 "Multipath route add - v4 nexthop then v6"
+       if [ $rc -eq 0 ]; then
+               check_route "172.16.104.0/24 nexthop via 172.16.103.2 dev veth3 weight 1 nexthop via inet6 2001:db8:101::2 dev veth1 weight 1"
+       fi
+
+       run_cmd "$IP ro del 172.16.104.0/24 nexthop via inet6 2001:db8:101::2 dev veth1 nexthop via 172.16.103.2 dev veth3"
+       log_test $? 2 "    Multipath route delete - nexthops in wrong order"
+
+       run_cmd "$IP ro del 172.16.104.0/24 nexthop via 172.16.103.2 dev veth3 nexthop via inet6 2001:db8:101::2 dev veth1"
+       log_test $? 0 "    Multipath route delete exact match"
+
+       route_cleanup
+}
 
 ################################################################################
 # usage
@@ -1511,6 +1564,7 @@ do
        ipv4_addr_metric)               ipv4_addr_metric_test;;
        ipv6_route_metrics)             ipv6_route_metrics_test;;
        ipv4_route_metrics)             ipv4_route_metrics_test;;
+       ipv4_route_v6_gw)               ipv4_route_v6_gw_test;;
 
        help) echo "Test names: $TESTS"; exit 0;;
        esac
diff --git a/tools/testing/selftests/net/forwarding/bridge_igmp.sh b/tools/testing/selftests/net/forwarding/bridge_igmp.sh
new file mode 100755 (executable)
index 0000000..88d2472
--- /dev/null
@@ -0,0 +1,152 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+ALL_TESTS="reportleave_test"
+NUM_NETIFS=4
+CHECK_TC="yes"
+TEST_GROUP="239.10.10.10"
+TEST_GROUP_MAC="01:00:5e:0a:0a:0a"
+source lib.sh
+
+h1_create()
+{
+       simple_if_init $h1 192.0.2.1/24 2001:db8:1::1/64
+}
+
+h1_destroy()
+{
+       simple_if_fini $h1 192.0.2.1/24 2001:db8:1::1/64
+}
+
+h2_create()
+{
+       simple_if_init $h2 192.0.2.2/24 2001:db8:1::2/64
+}
+
+h2_destroy()
+{
+       simple_if_fini $h2 192.0.2.2/24 2001:db8:1::2/64
+}
+
+switch_create()
+{
+       ip link add dev br0 type bridge mcast_snooping 1 mcast_querier 1
+
+       ip link set dev $swp1 master br0
+       ip link set dev $swp2 master br0
+
+       ip link set dev br0 up
+       ip link set dev $swp1 up
+       ip link set dev $swp2 up
+}
+
+switch_destroy()
+{
+       ip link set dev $swp2 down
+       ip link set dev $swp1 down
+
+       ip link del dev br0
+}
+
+setup_prepare()
+{
+       h1=${NETIFS[p1]}
+       swp1=${NETIFS[p2]}
+
+       swp2=${NETIFS[p3]}
+       h2=${NETIFS[p4]}
+
+       vrf_prepare
+
+       h1_create
+       h2_create
+
+       switch_create
+}
+
+cleanup()
+{
+       pre_cleanup
+
+       switch_destroy
+
+       # Always cleanup the mcast group
+       ip address del dev $h2 $TEST_GROUP/32 2>&1 1>/dev/null
+
+       h2_destroy
+       h1_destroy
+
+       vrf_cleanup
+}
+
+# return 0 if the packet wasn't seen on host2_if or 1 if it was
+mcast_packet_test()
+{
+       local mac=$1
+       local ip=$2
+       local host1_if=$3
+       local host2_if=$4
+       local seen=0
+
+       # Add an ACL on `host2_if` which will tell us whether the packet
+       # was received by it or not.
+       tc qdisc add dev $host2_if ingress
+       tc filter add dev $host2_if ingress protocol ip pref 1 handle 101 \
+               flower dst_mac $mac action drop
+
+       $MZ $host1_if -c 1 -p 64 -b $mac -B $ip -t udp "dp=4096,sp=2048" -q
+       sleep 1
+
+       tc -j -s filter show dev $host2_if ingress \
+               | jq -e ".[] | select(.options.handle == 101) \
+               | select(.options.actions[0].stats.packets == 1)" &> /dev/null
+       if [[ $? -eq 0 ]]; then
+               seen=1
+       fi
+
+       tc filter del dev $host2_if ingress protocol ip pref 1 handle 101 flower
+       tc qdisc del dev $host2_if ingress
+
+       return $seen
+}
+
+reportleave_test()
+{
+       RET=0
+       ip address add dev $h2 $TEST_GROUP/32 autojoin
+       check_err $? "Could not join $TEST_GROUP"
+
+       sleep 5
+       bridge mdb show dev br0 | grep $TEST_GROUP 1>/dev/null
+       check_err $? "Report didn't create mdb entry for $TEST_GROUP"
+
+       mcast_packet_test $TEST_GROUP_MAC $TEST_GROUP $h1 $h2
+       check_fail $? "Traffic to $TEST_GROUP wasn't forwarded"
+
+       log_test "IGMP report $TEST_GROUP"
+
+       RET=0
+       bridge mdb show dev br0 | grep $TEST_GROUP 1>/dev/null
+       check_err $? "mdb entry for $TEST_GROUP is missing"
+
+       ip address del dev $h2 $TEST_GROUP/32
+       check_err $? "Could not leave $TEST_GROUP"
+
+       sleep 5
+       bridge mdb show dev br0 | grep $TEST_GROUP 1>/dev/null
+       check_fail $? "Leave didn't delete mdb entry for $TEST_GROUP"
+
+       mcast_packet_test $TEST_GROUP_MAC $TEST_GROUP $h1 $h2
+       check_err $? "Traffic to $TEST_GROUP was forwarded without mdb entry"
+
+       log_test "IGMP leave $TEST_GROUP"
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tests_run
+
+exit $EXIT_STATUS
index 912b2dc50be3d8ee88f51a41e8c440524ecddc4a..524b15dabb3cc257c08a7bacf5150e22f2c50640 100755 (executable)
 # Kselftest framework requirement - SKIP code is 4.
 ksft_skip=4
 
+PAUSE_ON_FAIL=no
+VERBOSE=0
+TRACING=0
+
 # Some systems don't have a ping6 binary anymore
 which ping6 > /dev/null 2>&1 && ping6=$(which ping6) || ping6=$(which ping)
 
@@ -222,6 +226,23 @@ err_flush() {
        err_buf=
 }
 
+run_cmd() {
+       cmd="$*"
+
+       if [ "$VERBOSE" = "1" ]; then
+               printf "    COMMAND: $cmd\n"
+       fi
+
+       out="$($cmd 2>&1)"
+       rc=$?
+       if [ "$VERBOSE" = "1" -a -n "$out" ]; then
+               echo "    $out"
+               echo
+       fi
+
+       return $rc
+}
+
 # Find the auto-generated name for this namespace
 nsname() {
        eval echo \$NS_$1
@@ -258,22 +279,22 @@ setup_fou_or_gue() {
                fi
        fi
 
-       ${ns_a} ip fou add port 5555 ipproto ${ipproto} || return 2
-       ${ns_a} ip link add ${encap}_a type ${type} ${mode} local ${a_addr} remote ${b_addr} encap ${encap} encap-sport auto encap-dport 5556 || return 2
+       run_cmd ${ns_a} ip fou add port 5555 ipproto ${ipproto} || return 2
+       run_cmd ${ns_a} ip link add ${encap}_a type ${type} ${mode} local ${a_addr} remote ${b_addr} encap ${encap} encap-sport auto encap-dport 5556 || return 2
 
-       ${ns_b} ip fou add port 5556 ipproto ${ipproto}
-       ${ns_b} ip link add ${encap}_b type ${type} ${mode} local ${b_addr} remote ${a_addr} encap ${encap} encap-sport auto encap-dport 5555
+       run_cmd ${ns_b} ip fou add port 5556 ipproto ${ipproto}
+       run_cmd ${ns_b} ip link add ${encap}_b type ${type} ${mode} local ${b_addr} remote ${a_addr} encap ${encap} encap-sport auto encap-dport 5555
 
        if [ "${inner}" = "4" ]; then
-               ${ns_a} ip addr add ${tunnel4_a_addr}/${tunnel4_mask} dev ${encap}_a
-               ${ns_b} ip addr add ${tunnel4_b_addr}/${tunnel4_mask} dev ${encap}_b
+               run_cmd ${ns_a} ip addr add ${tunnel4_a_addr}/${tunnel4_mask} dev ${encap}_a
+               run_cmd ${ns_b} ip addr add ${tunnel4_b_addr}/${tunnel4_mask} dev ${encap}_b
        else
-               ${ns_a} ip addr add ${tunnel6_a_addr}/${tunnel6_mask} dev ${encap}_a
-               ${ns_b} ip addr add ${tunnel6_b_addr}/${tunnel6_mask} dev ${encap}_b
+               run_cmd ${ns_a} ip addr add ${tunnel6_a_addr}/${tunnel6_mask} dev ${encap}_a
+               run_cmd ${ns_b} ip addr add ${tunnel6_b_addr}/${tunnel6_mask} dev ${encap}_b
        fi
 
-       ${ns_a} ip link set ${encap}_a up
-       ${ns_b} ip link set ${encap}_b up
+       run_cmd ${ns_a} ip link set ${encap}_a up
+       run_cmd ${ns_b} ip link set ${encap}_b up
 }
 
 setup_fou44() {
@@ -319,17 +340,17 @@ setup_namespaces() {
 }
 
 setup_veth() {
-       ${ns_a} ip link add veth_a type veth peer name veth_b || return 1
-       ${ns_a} ip link set veth_b netns ${NS_B}
+       run_cmd ${ns_a} ip link add veth_a type veth peer name veth_b || return 1
+       run_cmd ${ns_a} ip link set veth_b netns ${NS_B}
 
-       ${ns_a} ip addr add ${veth4_a_addr}/${veth4_mask} dev veth_a
-       ${ns_b} ip addr add ${veth4_b_addr}/${veth4_mask} dev veth_b
+       run_cmd ${ns_a} ip addr add ${veth4_a_addr}/${veth4_mask} dev veth_a
+       run_cmd ${ns_b} ip addr add ${veth4_b_addr}/${veth4_mask} dev veth_b
 
-       ${ns_a} ip addr add ${veth6_a_addr}/${veth6_mask} dev veth_a
-       ${ns_b} ip addr add ${veth6_b_addr}/${veth6_mask} dev veth_b
+       run_cmd ${ns_a} ip addr add ${veth6_a_addr}/${veth6_mask} dev veth_a
+       run_cmd ${ns_b} ip addr add ${veth6_b_addr}/${veth6_mask} dev veth_b
 
-       ${ns_a} ip link set veth_a up
-       ${ns_b} ip link set veth_b up
+       run_cmd ${ns_a} ip link set veth_a up
+       run_cmd ${ns_b} ip link set veth_b up
 }
 
 setup_vti() {
@@ -342,14 +363,14 @@ setup_vti() {
 
        [ ${proto} -eq 6 ] && vti_type="vti6" || vti_type="vti"
 
-       ${ns_a} ip link add vti${proto}_a type ${vti_type} local ${veth_a_addr} remote ${veth_b_addr} key 10 || return 1
-       ${ns_b} ip link add vti${proto}_b type ${vti_type} local ${veth_b_addr} remote ${veth_a_addr} key 10
+       run_cmd ${ns_a} ip link add vti${proto}_a type ${vti_type} local ${veth_a_addr} remote ${veth_b_addr} key 10 || return 1
+       run_cmd ${ns_b} ip link add vti${proto}_b type ${vti_type} local ${veth_b_addr} remote ${veth_a_addr} key 10
 
-       ${ns_a} ip addr add ${vti_a_addr}/${vti_mask} dev vti${proto}_a
-       ${ns_b} ip addr add ${vti_b_addr}/${vti_mask} dev vti${proto}_b
+       run_cmd ${ns_a} ip addr add ${vti_a_addr}/${vti_mask} dev vti${proto}_a
+       run_cmd ${ns_b} ip addr add ${vti_b_addr}/${vti_mask} dev vti${proto}_b
 
-       ${ns_a} ip link set vti${proto}_a up
-       ${ns_b} ip link set vti${proto}_b up
+       run_cmd ${ns_a} ip link set vti${proto}_a up
+       run_cmd ${ns_b} ip link set vti${proto}_b up
 }
 
 setup_vti4() {
@@ -375,17 +396,17 @@ setup_vxlan_or_geneve() {
                opts_b=""
        fi
 
-       ${ns_a} ip link add ${type}_a type ${type} id 1 ${opts_a} remote ${b_addr} ${opts} || return 1
-       ${ns_b} ip link add ${type}_b type ${type} id 1 ${opts_b} remote ${a_addr} ${opts}
+       run_cmd ${ns_a} ip link add ${type}_a type ${type} id 1 ${opts_a} remote ${b_addr} ${opts} || return 1
+       run_cmd ${ns_b} ip link add ${type}_b type ${type} id 1 ${opts_b} remote ${a_addr} ${opts}
 
-       ${ns_a} ip addr add ${tunnel4_a_addr}/${tunnel4_mask} dev ${type}_a
-       ${ns_b} ip addr add ${tunnel4_b_addr}/${tunnel4_mask} dev ${type}_b
+       run_cmd ${ns_a} ip addr add ${tunnel4_a_addr}/${tunnel4_mask} dev ${type}_a
+       run_cmd ${ns_b} ip addr add ${tunnel4_b_addr}/${tunnel4_mask} dev ${type}_b
 
-       ${ns_a} ip addr add ${tunnel6_a_addr}/${tunnel6_mask} dev ${type}_a
-       ${ns_b} ip addr add ${tunnel6_b_addr}/${tunnel6_mask} dev ${type}_b
+       run_cmd ${ns_a} ip addr add ${tunnel6_a_addr}/${tunnel6_mask} dev ${type}_a
+       run_cmd ${ns_b} ip addr add ${tunnel6_b_addr}/${tunnel6_mask} dev ${type}_b
 
-       ${ns_a} ip link set ${type}_a up
-       ${ns_b} ip link set ${type}_b up
+       run_cmd ${ns_a} ip link set ${type}_a up
+       run_cmd ${ns_b} ip link set ${type}_b up
 }
 
 setup_geneve4() {
@@ -409,15 +430,15 @@ setup_xfrm() {
        veth_a_addr="${2}"
        veth_b_addr="${3}"
 
-       ${ns_a} ip -${proto} xfrm state add src ${veth_a_addr} dst ${veth_b_addr} spi 0x1000 proto esp aead "rfc4106(gcm(aes))" 0x0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f 128 mode tunnel || return 1
-       ${ns_a} ip -${proto} xfrm state add src ${veth_b_addr} dst ${veth_a_addr} spi 0x1001 proto esp aead "rfc4106(gcm(aes))" 0x0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f 128 mode tunnel
-       ${ns_a} ip -${proto} xfrm policy add dir out mark 10 tmpl src ${veth_a_addr} dst ${veth_b_addr} proto esp mode tunnel
-       ${ns_a} ip -${proto} xfrm policy add dir in mark 10 tmpl src ${veth_b_addr} dst ${veth_a_addr} proto esp mode tunnel
+       run_cmd "${ns_a} ip -${proto} xfrm state add src ${veth_a_addr} dst ${veth_b_addr} spi 0x1000 proto esp aead 'rfc4106(gcm(aes))' 0x0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f 128 mode tunnel" || return 1
+       run_cmd "${ns_a} ip -${proto} xfrm state add src ${veth_b_addr} dst ${veth_a_addr} spi 0x1001 proto esp aead 'rfc4106(gcm(aes))' 0x0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f 128 mode tunnel"
+       run_cmd "${ns_a} ip -${proto} xfrm policy add dir out mark 10 tmpl src ${veth_a_addr} dst ${veth_b_addr} proto esp mode tunnel"
+       run_cmd "${ns_a} ip -${proto} xfrm policy add dir in mark 10 tmpl src ${veth_b_addr} dst ${veth_a_addr} proto esp mode tunnel"
 
-       ${ns_b} ip -${proto} xfrm state add src ${veth_a_addr} dst ${veth_b_addr} spi 0x1000 proto esp aead "rfc4106(gcm(aes))" 0x0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f 128 mode tunnel
-       ${ns_b} ip -${proto} xfrm state add src ${veth_b_addr} dst ${veth_a_addr} spi 0x1001 proto esp aead "rfc4106(gcm(aes))" 0x0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f 128 mode tunnel
-       ${ns_b} ip -${proto} xfrm policy add dir out mark 10 tmpl src ${veth_b_addr} dst ${veth_a_addr} proto esp mode tunnel
-       ${ns_b} ip -${proto} xfrm policy add dir in mark 10 tmpl src ${veth_a_addr} dst ${veth_b_addr} proto esp mode tunnel
+       run_cmd "${ns_b} ip -${proto} xfrm state add src ${veth_a_addr} dst ${veth_b_addr} spi 0x1000 proto esp aead 'rfc4106(gcm(aes))' 0x0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f 128 mode tunnel"
+       run_cmd "${ns_b} ip -${proto} xfrm state add src ${veth_b_addr} dst ${veth_a_addr} spi 0x1001 proto esp aead 'rfc4106(gcm(aes))' 0x0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f 128 mode tunnel"
+       run_cmd "${ns_b} ip -${proto} xfrm policy add dir out mark 10 tmpl src ${veth_b_addr} dst ${veth_a_addr} proto esp mode tunnel"
+       run_cmd "${ns_b} ip -${proto} xfrm policy add dir in mark 10 tmpl src ${veth_a_addr} dst ${veth_b_addr} proto esp mode tunnel"
 }
 
 setup_xfrm4() {
@@ -481,7 +502,7 @@ setup() {
 }
 
 trace() {
-       [ $tracing -eq 0 ] && return
+       [ $TRACING -eq 0 ] && return
 
        for arg do
                [ "${ns_cmd}" = "" ] && ns_cmd="${arg}" && continue
@@ -597,8 +618,8 @@ test_pmtu_ipvX() {
        mtu "${ns_b}"  veth_B-R2 1500
 
        # Create route exceptions
-       ${ns_a} ${ping} -q -M want -i 0.1 -w 1 -s 1800 ${dst1} > /dev/null
-       ${ns_a} ${ping} -q -M want -i 0.1 -w 1 -s 1800 ${dst2} > /dev/null
+       run_cmd ${ns_a} ${ping} -q -M want -i 0.1 -w 1 -s 1800 ${dst1}
+       run_cmd ${ns_a} ${ping} -q -M want -i 0.1 -w 1 -s 1800 ${dst2}
 
        # Check that exceptions have been created with the correct PMTU
        pmtu_1="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst1})"
@@ -630,7 +651,7 @@ test_pmtu_ipvX() {
        # Decrease remote MTU on path via R2, get new exception
        mtu "${ns_r2}" veth_R2-B 400
        mtu "${ns_b}"  veth_B-R2 400
-       ${ns_a} ${ping} -q -M want -i 0.1 -w 1 -s 1400 ${dst2} > /dev/null
+       run_cmd ${ns_a} ${ping} -q -M want -i 0.1 -w 1 -s 1400 ${dst2}
        pmtu_2="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst2})"
        check_pmtu_value "lock 552" "${pmtu_2}" "exceeding MTU, with MTU < min_pmtu" || return 1
 
@@ -647,7 +668,7 @@ test_pmtu_ipvX() {
        check_pmtu_value "1500" "${pmtu_2}" "increasing local MTU" || return 1
 
        # Get new exception
-       ${ns_a} ${ping} -q -M want -i 0.1 -w 1 -s 1400 ${dst2} > /dev/null
+       run_cmd ${ns_a} ${ping} -q -M want -i 0.1 -w 1 -s 1400 ${dst2}
        pmtu_2="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst2})"
        check_pmtu_value "lock 552" "${pmtu_2}" "exceeding MTU, with MTU < min_pmtu" || return 1
 }
@@ -696,7 +717,7 @@ test_pmtu_ipvX_over_vxlanY_or_geneveY_exception() {
 
        mtu "${ns_a}" ${type}_a $((${ll_mtu} + 1000))
        mtu "${ns_b}" ${type}_b $((${ll_mtu} + 1000))
-       ${ns_a} ${ping} -q -M want -i 0.1 -w 1 -s $((${ll_mtu} + 500)) ${dst} > /dev/null
+       run_cmd ${ns_a} ${ping} -q -M want -i 0.1 -w 1 -s $((${ll_mtu} + 500)) ${dst}
 
        # Check that exception was created
        pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst})"
@@ -776,7 +797,7 @@ test_pmtu_ipvX_over_fouY_or_gueY() {
 
        mtu "${ns_a}" ${encap}_a $((${ll_mtu} + 1000))
        mtu "${ns_b}" ${encap}_b $((${ll_mtu} + 1000))
-       ${ns_a} ${ping} -q -M want -i 0.1 -w 1 -s $((${ll_mtu} + 500)) ${dst} > /dev/null
+       run_cmd ${ns_a} ${ping} -q -M want -i 0.1 -w 1 -s $((${ll_mtu} + 500)) ${dst}
 
        # Check that exception was created
        pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst})"
@@ -834,13 +855,13 @@ test_pmtu_vti4_exception() {
 
        # Send DF packet without exceeding link layer MTU, check that no
        # exception is created
-       ${ns_a} ping -q -M want -i 0.1 -w 1 -s ${ping_payload} ${tunnel4_b_addr} > /dev/null
+       run_cmd ${ns_a} ping -q -M want -i 0.1 -w 1 -s ${ping_payload} ${tunnel4_b_addr}
        pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${tunnel4_b_addr})"
        check_pmtu_value "" "${pmtu}" "sending packet smaller than PMTU (IP payload length ${esp_payload_rfc4106})" || return 1
 
        # Now exceed link layer MTU by one byte, check that exception is created
        # with the right PMTU value
-       ${ns_a} ping -q -M want -i 0.1 -w 1 -s $((ping_payload + 1)) ${tunnel4_b_addr} > /dev/null
+       run_cmd ${ns_a} ping -q -M want -i 0.1 -w 1 -s $((ping_payload + 1)) ${tunnel4_b_addr}
        pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${tunnel4_b_addr})"
        check_pmtu_value "${esp_payload_rfc4106}" "${pmtu}" "exceeding PMTU (IP payload length $((esp_payload_rfc4106 + 1)))"
 }
@@ -856,7 +877,7 @@ test_pmtu_vti6_exception() {
        mtu "${ns_b}" veth_b 4000
        mtu "${ns_a}" vti6_a 5000
        mtu "${ns_b}" vti6_b 5000
-       ${ns_a} ${ping6} -q -i 0.1 -w 1 -s 60000 ${tunnel6_b_addr} > /dev/null
+       run_cmd ${ns_a} ${ping6} -q -i 0.1 -w 1 -s 60000 ${tunnel6_b_addr}
 
        # Check that exception was created
        pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${tunnel6_b_addr})"
@@ -902,9 +923,9 @@ test_pmtu_vti6_default_mtu() {
 test_pmtu_vti4_link_add_mtu() {
        setup namespaces || return 2
 
-       ${ns_a} ip link add vti4_a type vti local ${veth4_a_addr} remote ${veth4_b_addr} key 10
+       run_cmd ${ns_a} ip link add vti4_a type vti local ${veth4_a_addr} remote ${veth4_b_addr} key 10
        [ $? -ne 0 ] && err "  vti not supported" && return 2
-       ${ns_a} ip link del vti4_a
+       run_cmd ${ns_a} ip link del vti4_a
 
        fail=0
 
@@ -912,7 +933,7 @@ test_pmtu_vti4_link_add_mtu() {
        max=$((65535 - 20))
        # Check invalid values first
        for v in $((min - 1)) $((max + 1)); do
-               ${ns_a} ip link add vti4_a mtu ${v} type vti local ${veth4_a_addr} remote ${veth4_b_addr} key 10 2>/dev/null
+               run_cmd ${ns_a} ip link add vti4_a mtu ${v} type vti local ${veth4_a_addr} remote ${veth4_b_addr} key 10
                # This can fail, or MTU can be adjusted to a proper value
                [ $? -ne 0 ] && continue
                mtu="$(link_get_mtu "${ns_a}" vti4_a)"
@@ -920,14 +941,14 @@ test_pmtu_vti4_link_add_mtu() {
                        err "  vti tunnel created with invalid MTU ${mtu}"
                        fail=1
                fi
-               ${ns_a} ip link del vti4_a
+               run_cmd ${ns_a} ip link del vti4_a
        done
 
        # Now check valid values
        for v in ${min} 1300 ${max}; do
-               ${ns_a} ip link add vti4_a mtu ${v} type vti local ${veth4_a_addr} remote ${veth4_b_addr} key 10
+               run_cmd ${ns_a} ip link add vti4_a mtu ${v} type vti local ${veth4_a_addr} remote ${veth4_b_addr} key 10
                mtu="$(link_get_mtu "${ns_a}" vti4_a)"
-               ${ns_a} ip link del vti4_a
+               run_cmd ${ns_a} ip link del vti4_a
                if [ "${mtu}" != "${v}" ]; then
                        err "  vti MTU ${mtu} doesn't match configured value ${v}"
                        fail=1
@@ -940,9 +961,9 @@ test_pmtu_vti4_link_add_mtu() {
 test_pmtu_vti6_link_add_mtu() {
        setup namespaces || return 2
 
-       ${ns_a} ip link add vti6_a type vti6 local ${veth6_a_addr} remote ${veth6_b_addr} key 10
+       run_cmd ${ns_a} ip link add vti6_a type vti6 local ${veth6_a_addr} remote ${veth6_b_addr} key 10
        [ $? -ne 0 ] && err "  vti6 not supported" && return 2
-       ${ns_a} ip link del vti6_a
+       run_cmd ${ns_a} ip link del vti6_a
 
        fail=0
 
@@ -950,7 +971,7 @@ test_pmtu_vti6_link_add_mtu() {
        max=$((65535 - 40))
        # Check invalid values first
        for v in $((min - 1)) $((max + 1)); do
-               ${ns_a} ip link add vti6_a mtu ${v} type vti6 local ${veth6_a_addr} remote ${veth6_b_addr} key 10 2>/dev/null
+               run_cmd ${ns_a} ip link add vti6_a mtu ${v} type vti6 local ${veth6_a_addr} remote ${veth6_b_addr} key 10
                # This can fail, or MTU can be adjusted to a proper value
                [ $? -ne 0 ] && continue
                mtu="$(link_get_mtu "${ns_a}" vti6_a)"
@@ -958,14 +979,14 @@ test_pmtu_vti6_link_add_mtu() {
                        err "  vti6 tunnel created with invalid MTU ${v}"
                        fail=1
                fi
-               ${ns_a} ip link del vti6_a
+               run_cmd ${ns_a} ip link del vti6_a
        done
 
        # Now check valid values
        for v in 68 1280 1300 $((65535 - 40)); do
-               ${ns_a} ip link add vti6_a mtu ${v} type vti6 local ${veth6_a_addr} remote ${veth6_b_addr} key 10
+               run_cmd ${ns_a} ip link add vti6_a mtu ${v} type vti6 local ${veth6_a_addr} remote ${veth6_b_addr} key 10
                mtu="$(link_get_mtu "${ns_a}" vti6_a)"
-               ${ns_a} ip link del vti6_a
+               run_cmd ${ns_a} ip link del vti6_a
                if [ "${mtu}" != "${v}" ]; then
                        err "  vti6 MTU ${mtu} doesn't match configured value ${v}"
                        fail=1
@@ -978,19 +999,19 @@ test_pmtu_vti6_link_add_mtu() {
 test_pmtu_vti6_link_change_mtu() {
        setup namespaces || return 2
 
-       ${ns_a} ip link add dummy0 mtu 1500 type dummy
+       run_cmd ${ns_a} ip link add dummy0 mtu 1500 type dummy
        [ $? -ne 0 ] && err "  dummy not supported" && return 2
-       ${ns_a} ip link add dummy1 mtu 3000 type dummy
-       ${ns_a} ip link set dummy0 up
-       ${ns_a} ip link set dummy1 up
+       run_cmd ${ns_a} ip link add dummy1 mtu 3000 type dummy
+       run_cmd ${ns_a} ip link set dummy0 up
+       run_cmd ${ns_a} ip link set dummy1 up
 
-       ${ns_a} ip addr add ${dummy6_0_addr}/${dummy6_mask} dev dummy0
-       ${ns_a} ip addr add ${dummy6_1_addr}/${dummy6_mask} dev dummy1
+       run_cmd ${ns_a} ip addr add ${dummy6_0_addr}/${dummy6_mask} dev dummy0
+       run_cmd ${ns_a} ip addr add ${dummy6_1_addr}/${dummy6_mask} dev dummy1
 
        fail=0
 
        # Create vti6 interface bound to device, passing MTU, check it
-       ${ns_a} ip link add vti6_a mtu 1300 type vti6 remote ${dummy6_0_addr} local ${dummy6_0_addr}
+       run_cmd ${ns_a} ip link add vti6_a mtu 1300 type vti6 remote ${dummy6_0_addr} local ${dummy6_0_addr}
        mtu="$(link_get_mtu "${ns_a}" vti6_a)"
        if [ ${mtu} -ne 1300 ]; then
                err "  vti6 MTU ${mtu} doesn't match configured value 1300"
@@ -999,7 +1020,7 @@ test_pmtu_vti6_link_change_mtu() {
 
        # Move to another device with different MTU, without passing MTU, check
        # MTU is adjusted
-       ${ns_a} ip link set vti6_a type vti6 remote ${dummy6_1_addr} local ${dummy6_1_addr}
+       run_cmd ${ns_a} ip link set vti6_a type vti6 remote ${dummy6_1_addr} local ${dummy6_1_addr}
        mtu="$(link_get_mtu "${ns_a}" vti6_a)"
        if [ ${mtu} -ne $((3000 - 40)) ]; then
                err "  vti MTU ${mtu} is not dummy MTU 3000 minus IPv6 header length"
@@ -1007,7 +1028,7 @@ test_pmtu_vti6_link_change_mtu() {
        fi
 
        # Move it back, passing MTU, check MTU is not overridden
-       ${ns_a} ip link set vti6_a mtu 1280 type vti6 remote ${dummy6_0_addr} local ${dummy6_0_addr}
+       run_cmd ${ns_a} ip link set vti6_a mtu 1280 type vti6 remote ${dummy6_0_addr} local ${dummy6_0_addr}
        mtu="$(link_get_mtu "${ns_a}" vti6_a)"
        if [ ${mtu} -ne 1280 ]; then
                err "  vti6 MTU ${mtu} doesn't match configured value 1280"
@@ -1052,7 +1073,7 @@ test_cleanup_vxlanX_exception() {
        # Fill exception cache for multiple CPUs (2)
        # we can always use inner IPv4 for that
        for cpu in ${cpu_list}; do
-               taskset --cpu-list ${cpu} ${ns_a} ping -q -M want -i 0.1 -w 1 -s $((${ll_mtu} + 500)) ${tunnel4_b_addr} > /dev/null
+               run_cmd taskset --cpu-list ${cpu} ${ns_a} ping -q -M want -i 0.1 -w 1 -s $((${ll_mtu} + 500)) ${tunnel4_b_addr}
        done
 
        ${ns_a} ip link del dev veth_A-R1 &
@@ -1084,29 +1105,33 @@ usage() {
        exit 1
 }
 
+################################################################################
+#
 exitcode=0
 desc=0
+
+while getopts :ptv o
+do
+       case $o in
+       p) PAUSE_ON_FAIL=yes;;
+       v) VERBOSE=1;;
+       t) if which tcpdump > /dev/null 2>&1; then
+               TRACING=1
+          else
+               echo "=== tcpdump not available, tracing disabled"
+          fi
+          ;;
+       *) usage;;
+       esac
+done
+shift $(($OPTIND-1))
+
 IFS="  
 "
 
-tracing=0
 for arg do
-       if [ "${arg}" != "${arg#--*}" ]; then
-               opt="${arg#--}"
-               if [ "${opt}" = "trace" ]; then
-                       if which tcpdump > /dev/null 2>&1; then
-                               tracing=1
-                       else
-                               echo "=== tcpdump not available, tracing disabled"
-                       fi
-               else
-                       usage
-               fi
-       else
-               # Check first that all requested tests are available before
-               # running any
-               command -v > /dev/null "test_${arg}" || { echo "=== Test ${arg} not found"; usage; }
-       fi
+       # Check first that all requested tests are available before running any
+       command -v > /dev/null "test_${arg}" || { echo "=== Test ${arg} not found"; usage; }
 done
 
 trap cleanup EXIT
@@ -1124,6 +1149,11 @@ for t in ${tests}; do
 
        (
                unset IFS
+
+               if [ "$VERBOSE" = "1" ]; then
+                       printf "\n##########################################################################\n\n"
+               fi
+
                eval test_${name}
                ret=$?
                cleanup
@@ -1132,6 +1162,11 @@ for t in ${tests}; do
                        printf "TEST: %-60s  [ OK ]\n" "${t}"
                elif [ $ret -eq 1 ]; then
                        printf "TEST: %-60s  [FAIL]\n" "${t}"
+                       if [ "${PAUSE_ON_FAIL}" = "yes" ]; then
+                               echo
+                               echo "Pausing. Hit enter to continue"
+                               read a
+                       fi
                        err_flush
                        exit 1
                elif [ $ret -eq 2 ]; then
index c9ff2b47bd1ca3a2f70ee0683cb2b79b170c74f5..80dae72a25c77f53a0268313f92394a9fe959462 100644 (file)
@@ -1,6 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0
 # Makefile for netfilter selftests
 
-TEST_PROGS := nft_trans_stress.sh nft_nat.sh
+TEST_PROGS := nft_trans_stress.sh nft_nat.sh bridge_brouter.sh
 
 include ../lib.mk
diff --git a/tools/testing/selftests/netfilter/bridge_brouter.sh b/tools/testing/selftests/netfilter/bridge_brouter.sh
new file mode 100755 (executable)
index 0000000..29f3955
--- /dev/null
@@ -0,0 +1,146 @@
+#!/bin/bash
+#
+# This test is for bridge 'brouting', i.e. make some packets being routed
+# rather than getting bridged even though they arrive on interface that is
+# part of a bridge.
+
+#           eth0    br0     eth0
+# setup is: ns1 <-> ns0 <-> ns2
+
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+ret=0
+
+ebtables -V > /dev/null 2>&1
+if [ $? -ne 0 ];then
+       echo "SKIP: Could not run test without ebtables"
+       exit $ksft_skip
+fi
+
+ip -Version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+       echo "SKIP: Could not run test without ip tool"
+       exit $ksft_skip
+fi
+
+ip netns add ns0
+ip netns add ns1
+ip netns add ns2
+
+ip link add veth0 netns ns0 type veth peer name eth0 netns ns1
+if [ $? -ne 0 ]; then
+       echo "SKIP: Can't create veth device"
+       exit $ksft_skip
+fi
+ip link add veth1 netns ns0 type veth peer name eth0 netns ns2
+
+ip -net ns0 link set lo up
+ip -net ns0 link set veth0 up
+ip -net ns0 link set veth1 up
+
+ip -net ns0 link add br0 type bridge
+if [ $? -ne 0 ]; then
+       echo "SKIP: Can't create bridge br0"
+       exit $ksft_skip
+fi
+
+ip -net ns0 link set veth0 master br0
+ip -net ns0 link set veth1 master br0
+ip -net ns0 link set br0 up
+ip -net ns0 addr add 10.0.0.1/24 dev br0
+
+# place both in same subnet, ns1 and ns2 connected via ns0:br0
+for i in 1 2; do
+  ip -net ns$i link set lo up
+  ip -net ns$i link set eth0 up
+  ip -net ns$i addr add 10.0.0.1$i/24 dev eth0
+done
+
+test_ebtables_broute()
+{
+       local cipt
+
+       # redirect is needed so the dstmac is rewritten to the bridge itself,
+       # ip stack won't process OTHERHOST (foreign unicast mac) packets.
+       ip netns exec ns0 ebtables -t broute -A BROUTING -p ipv4 --ip-protocol icmp -j redirect --redirect-target=DROP
+       if [ $? -ne 0 ]; then
+               echo "SKIP: Could not add ebtables broute redirect rule"
+               return $ksft_skip
+       fi
+
+       # ping netns1, expected to not work (ip forwarding is off)
+       ip netns exec ns1 ping -q -c 1 10.0.0.12 > /dev/null 2>&1
+       if [ $? -eq 0 ]; then
+               echo "ERROR: ping works, should have failed" 1>&2
+               return 1
+       fi
+
+       # enable forwarding on both interfaces.
+       # neither needs an ip address, but at least the bridge needs
+       # an ip address in same network segment as ns1 and ns2 (ns0
+       # needs to be able to determine route for to-be-forwarded packet).
+       ip netns exec ns0 sysctl -q net.ipv4.conf.veth0.forwarding=1
+       ip netns exec ns0 sysctl -q net.ipv4.conf.veth1.forwarding=1
+
+       sleep 1
+
+       ip netns exec ns1 ping -q -c 1 10.0.0.12 > /dev/null
+       if [ $? -ne 0 ]; then
+               echo "ERROR: ping did not work, but it should (broute+forward)" 1>&2
+               return 1
+       fi
+
+       echo "PASS: ns1/ns2 connectivity with active broute rule"
+       ip netns exec ns0 ebtables -t broute -F
+
+       # ping netns1, expected to work (frames are bridged)
+       ip netns exec ns1 ping -q -c 1 10.0.0.12 > /dev/null
+       if [ $? -ne 0 ]; then
+               echo "ERROR: ping did not work, but it should (bridged)" 1>&2
+               return 1
+       fi
+
+       ip netns exec ns0 ebtables -t filter -A FORWARD -p ipv4 --ip-protocol icmp -j DROP
+
+       # ping netns1, expected to not work (DROP in bridge forward)
+       ip netns exec ns1 ping -q -c 1 10.0.0.12 > /dev/null 2>&1
+       if [ $? -eq 0 ]; then
+               echo "ERROR: ping works, should have failed (icmp forward drop)" 1>&2
+               return 1
+       fi
+
+       # re-activate brouter
+       ip netns exec ns0 ebtables -t broute -A BROUTING -p ipv4 --ip-protocol icmp -j redirect --redirect-target=DROP
+
+       ip netns exec ns2 ping -q -c 1 10.0.0.11 > /dev/null
+       if [ $? -ne 0 ]; then
+               echo "ERROR: ping did not work, but it should (broute+forward 2)" 1>&2
+               return 1
+       fi
+
+       echo "PASS: ns1/ns2 connectivity with active broute rule and bridge forward drop"
+       return 0
+}
+
+# test basic connectivity
+ip netns exec ns1 ping -c 1 -q 10.0.0.12 > /dev/null
+if [ $? -ne 0 ]; then
+    echo "ERROR: Could not reach ns2 from ns1" 1>&2
+    ret=1
+fi
+
+ip netns exec ns2 ping -c 1 -q 10.0.0.11 > /dev/null
+if [ $? -ne 0 ]; then
+    echo "ERROR: Could not reach ns1 from ns2" 1>&2
+    ret=1
+fi
+
+if [ $ret -eq 0 ];then
+    echo "PASS: netns connectivity: ns1 and ns2 can reach each other"
+fi
+
+test_ebtables_broute
+ret=$?
+for i in 0 1 2; do ip netns del ns$i;done
+
+exit $ret
index 8ec76681605cca08f8cad14720ada2986d74f76c..248905130d5d3050ed92f978255d0045ad202684 100755 (executable)
@@ -6,6 +6,7 @@
 # Kselftest framework requirement - SKIP code is 4.
 ksft_skip=4
 ret=0
+test_inet_nat=true
 
 nft --version > /dev/null 2>&1
 if [ $? -ne 0 ];then
@@ -141,17 +142,24 @@ reset_counters()
 
 test_local_dnat6()
 {
+       local family=$1
        local lret=0
+       local IPF=""
+
+       if [ $family = "inet" ];then
+               IPF="ip6"
+       fi
+
 ip netns exec ns0 nft -f - <<EOF
-table ip6 nat {
+table $family nat {
        chain output {
                type nat hook output priority 0; policy accept;
-               ip6 daddr dead:1::99 dnat to dead:2::99
+               ip6 daddr dead:1::99 dnat $IPF to dead:2::99
        }
 }
 EOF
        if [ $? -ne 0 ]; then
-               echo "SKIP: Could not add add ip6 dnat hook"
+               echo "SKIP: Could not add add $family dnat hook"
                return $ksft_skip
        fi
 
@@ -201,7 +209,7 @@ EOF
                fi
        done
 
-       test $lret -eq 0 && echo "PASS: ipv6 ping to ns1 was NATted to ns2"
+       test $lret -eq 0 && echo "PASS: ipv6 ping to ns1 was $family NATted to ns2"
        ip netns exec ns0 nft flush chain ip6 nat output
 
        return $lret
@@ -209,15 +217,32 @@ EOF
 
 test_local_dnat()
 {
+       local family=$1
        local lret=0
-ip netns exec ns0 nft -f - <<EOF
-table ip nat {
+       local IPF=""
+
+       if [ $family = "inet" ];then
+               IPF="ip"
+       fi
+
+ip netns exec ns0 nft -f - <<EOF 2>/dev/null
+table $family nat {
        chain output {
                type nat hook output priority 0; policy accept;
-               ip daddr 10.0.1.99 dnat to 10.0.2.99
+               ip daddr 10.0.1.99 dnat $IPF to 10.0.2.99
        }
 }
 EOF
+       if [ $? -ne 0 ]; then
+               if [ $family = "inet" ];then
+                       echo "SKIP: inet nat tests"
+                       test_inet_nat=false
+                       return $ksft_skip
+               fi
+               echo "SKIP: Could not add add $family dnat hook"
+               return $ksft_skip
+       fi
+
        # ping netns1, expect rewrite to netns2
        ip netns exec ns0 ping -q -c 1 10.0.1.99 > /dev/null
        if [ $? -ne 0 ]; then
@@ -264,9 +289,9 @@ EOF
                fi
        done
 
-       test $lret -eq 0 && echo "PASS: ping to ns1 was NATted to ns2"
+       test $lret -eq 0 && echo "PASS: ping to ns1 was $family NATted to ns2"
 
-       ip netns exec ns0 nft flush chain ip nat output
+       ip netns exec ns0 nft flush chain $family nat output
 
        reset_counters
        ip netns exec ns0 ping -q -c 1 10.0.1.99 > /dev/null
@@ -313,7 +338,7 @@ EOF
                fi
        done
 
-       test $lret -eq 0 && echo "PASS: ping to ns1 OK after nat output chain flush"
+       test $lret -eq 0 && echo "PASS: ping to ns1 OK after $family nat output chain flush"
 
        return $lret
 }
@@ -321,6 +346,7 @@ EOF
 
 test_masquerade6()
 {
+       local family=$1
        local lret=0
 
        ip netns exec ns0 sysctl net.ipv6.conf.all.forwarding=1 > /dev/null
@@ -351,16 +377,21 @@ test_masquerade6()
 
 # add masquerading rule
 ip netns exec ns0 nft -f - <<EOF
-table ip6 nat {
+table $family nat {
        chain postrouting {
                type nat hook postrouting priority 0; policy accept;
                meta oif veth0 masquerade
        }
 }
 EOF
+       if [ $? -ne 0 ]; then
+               echo "SKIP: Could not add add $family masquerade hook"
+               return $ksft_skip
+       fi
+
        ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1
        if [ $? -ne 0 ] ; then
-               echo "ERROR: cannot ping ns1 from ns2 with active ipv6 masquerading"
+               echo "ERROR: cannot ping ns1 from ns2 with active $family masquerading"
                lret=1
        fi
 
@@ -397,19 +428,20 @@ EOF
                fi
        done
 
-       ip netns exec ns0 nft flush chain ip6 nat postrouting
+       ip netns exec ns0 nft flush chain $family nat postrouting
        if [ $? -ne 0 ]; then
-               echo "ERROR: Could not flush ip6 nat postrouting" 1>&2
+               echo "ERROR: Could not flush $family nat postrouting" 1>&2
                lret=1
        fi
 
-       test $lret -eq 0 && echo "PASS: IPv6 masquerade for ns2"
+       test $lret -eq 0 && echo "PASS: $family IPv6 masquerade for ns2"
 
        return $lret
 }
 
 test_masquerade()
 {
+       local family=$1
        local lret=0
 
        ip netns exec ns0 sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null
@@ -440,16 +472,21 @@ test_masquerade()
 
 # add masquerading rule
 ip netns exec ns0 nft -f - <<EOF
-table ip nat {
+table $family nat {
        chain postrouting {
                type nat hook postrouting priority 0; policy accept;
                meta oif veth0 masquerade
        }
 }
 EOF
+       if [ $? -ne 0 ]; then
+               echo "SKIP: Could not add add $family masquerade hook"
+               return $ksft_skip
+       fi
+
        ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1
        if [ $? -ne 0 ] ; then
-               echo "ERROR: cannot ping ns1 from ns2 with active ip masquerading"
+               echo "ERROR: cannot ping ns1 from ns2 with active $family masquerading"
                lret=1
        fi
 
@@ -485,19 +522,20 @@ EOF
                fi
        done
 
-       ip netns exec ns0 nft flush chain ip nat postrouting
+       ip netns exec ns0 nft flush chain $family nat postrouting
        if [ $? -ne 0 ]; then
-               echo "ERROR: Could not flush nat postrouting" 1>&2
+               echo "ERROR: Could not flush $family nat postrouting" 1>&2
                lret=1
        fi
 
-       test $lret -eq 0 && echo "PASS: IP masquerade for ns2"
+       test $lret -eq 0 && echo "PASS: $family IP masquerade for ns2"
 
        return $lret
 }
 
 test_redirect6()
 {
+       local family=$1
        local lret=0
 
        ip netns exec ns0 sysctl net.ipv6.conf.all.forwarding=1 > /dev/null
@@ -527,16 +565,21 @@ test_redirect6()
 
 # add redirect rule
 ip netns exec ns0 nft -f - <<EOF
-table ip6 nat {
+table $family nat {
        chain prerouting {
                type nat hook prerouting priority 0; policy accept;
                meta iif veth1 meta l4proto icmpv6 ip6 saddr dead:2::99 ip6 daddr dead:1::99 redirect
        }
 }
 EOF
+       if [ $? -ne 0 ]; then
+               echo "SKIP: Could not add add $family redirect hook"
+               return $ksft_skip
+       fi
+
        ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1
        if [ $? -ne 0 ] ; then
-               echo "ERROR: cannot ping ns1 from ns2 with active ip6 redirect"
+               echo "ERROR: cannot ping ns1 from ns2 via ipv6 with active $family redirect"
                lret=1
        fi
 
@@ -560,19 +603,20 @@ EOF
                fi
        done
 
-       ip netns exec ns0 nft delete table ip6 nat
+       ip netns exec ns0 nft delete table $family nat
        if [ $? -ne 0 ]; then
-               echo "ERROR: Could not delete ip6 nat table" 1>&2
+               echo "ERROR: Could not delete $family nat table" 1>&2
                lret=1
        fi
 
-       test $lret -eq 0 && echo "PASS: IPv6 redirection for ns2"
+       test $lret -eq 0 && echo "PASS: $family IPv6 redirection for ns2"
 
        return $lret
 }
 
 test_redirect()
 {
+       local family=$1
        local lret=0
 
        ip netns exec ns0 sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null
@@ -603,16 +647,21 @@ test_redirect()
 
 # add redirect rule
 ip netns exec ns0 nft -f - <<EOF
-table ip nat {
+table $family nat {
        chain prerouting {
                type nat hook prerouting priority 0; policy accept;
                meta iif veth1 ip protocol icmp ip saddr 10.0.2.99 ip daddr 10.0.1.99 redirect
        }
 }
 EOF
+       if [ $? -ne 0 ]; then
+               echo "SKIP: Could not add add $family redirect hook"
+               return $ksft_skip
+       fi
+
        ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1
        if [ $? -ne 0 ] ; then
-               echo "ERROR: cannot ping ns1 from ns2 with active ip redirect"
+               echo "ERROR: cannot ping ns1 from ns2 with active $family ip redirect"
                lret=1
        fi
 
@@ -637,13 +686,13 @@ EOF
                fi
        done
 
-       ip netns exec ns0 nft delete table ip nat
+       ip netns exec ns0 nft delete table $family nat
        if [ $? -ne 0 ]; then
-               echo "ERROR: Could not delete nat table" 1>&2
+               echo "ERROR: Could not delete $family nat table" 1>&2
                lret=1
        fi
 
-       test $lret -eq 0 && echo "PASS: IP redirection for ns2"
+       test $lret -eq 0 && echo "PASS: $family IP redirection for ns2"
 
        return $lret
 }
@@ -746,16 +795,25 @@ if [ $ret -eq 0 ];then
 fi
 
 reset_counters
-test_local_dnat
-test_local_dnat6
+test_local_dnat ip
+test_local_dnat6 ip6
+reset_counters
+$test_inet_nat && test_local_dnat inet
+$test_inet_nat && test_local_dnat6 inet
 
 reset_counters
-test_masquerade
-test_masquerade6
+test_masquerade ip
+test_masquerade6 ip6
+reset_counters
+$test_inet_nat && test_masquerade inet
+$test_inet_nat && test_masquerade6 inet
 
 reset_counters
-test_redirect
-test_redirect6
+test_redirect ip
+test_redirect6 ip6
+reset_counters
+$test_inet_nat && test_redirect inet
+$test_inet_nat && test_redirect6 inet
 
 for i in 0 1 2; do ip netns del ns$i;done
 
index 27f0acaed880e765e9829306b1cc3a28e2755cbb..ddabb160a11bacc151b9891f14f7da62846b8ed1 100644 (file)
             "$TC actions flush action sample"
         ]
     },
+    {
+        "id": "7571",
+        "name": "Add sample action with invalid rate",
+        "category": [
+            "actions",
+            "sample"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action sample",
+                0,
+                1,
+                255
+            ]
+        ],
+        "cmdUnderTest": "$TC actions add action sample rate 0 group 1 index 2",
+        "expExitCode": "255",
+        "verifyCmd": "$TC actions get action sample index 2",
+        "matchPattern": "action order [0-9]+: sample rate 1/0 group 1.*index 2 ref",
+        "matchCount": "0",
+        "teardown": [
+            "$TC actions flush action sample"
+        ]
+    },
     {
         "id": "b6d4",
         "name": "Add sample action with mandatory arguments and invalid control action",
index 99a5ffca1088ad52cdac7842739eaefc40f99c07..e2f92cefb8d5ef06237e87ffc88cad19667afbac 100644 (file)
             "$TC qdisc del dev $DEV1 ingress"
         ]
     },
+    {
+        "id": "2638",
+        "name": "Add matchall and try to get it",
+        "category": [
+            "filter",
+            "matchall"
+        ],
+        "setup": [
+            "$TC qdisc add dev $DEV1 clsact",
+            "$TC filter add dev $DEV1 protocol all pref 1 ingress handle 0x1234 matchall action ok"
+        ],
+        "cmdUnderTest": "$TC filter get dev $DEV1 protocol all pref 1 ingress handle 0x1234 matchall",
+        "expExitCode": "0",
+        "verifyCmd": "$TC filter show dev $DEV1 ingress",
+        "matchPattern": "filter protocol all pref 1 matchall chain 0 handle 0x1234",
+        "matchCount": "1",
+        "teardown": [
+            "$TC qdisc del dev $DEV1 clsact"
+        ]
+    },
     {
         "id": "d052",
         "name": "Add 1M filters with the same action",
             "$TC qdisc del dev $DEV2 ingress",
             "/bin/rm $BATCH_FILE"
         ]
+    },
+    {
+        "id": "4cbd",
+        "name": "Try to add filter with duplicate key",
+        "category": [
+            "filter",
+            "flower"
+        ],
+        "setup": [
+            "$TC qdisc add dev $DEV2 ingress",
+            "$TC filter add dev $DEV2 protocol ip prio 1 parent ffff: flower dst_mac e4:11:22:11:4a:51 src_mac e4:11:22:11:4a:50 ip_proto tcp src_ip 1.1.1.1 dst_ip 2.2.2.2 action drop"
+        ],
+        "cmdUnderTest": "$TC filter add dev $DEV2 protocol ip prio 1 parent ffff: flower dst_mac e4:11:22:11:4a:51 src_mac e4:11:22:11:4a:50 ip_proto tcp src_ip 1.1.1.1 dst_ip 2.2.2.2 action drop",
+        "expExitCode": "2",
+        "verifyCmd": "$TC -s filter show dev $DEV2 ingress",
+        "matchPattern": "filter protocol ip pref 1 flower chain 0 handle",
+        "matchCount": "1",
+        "teardown": [
+            "$TC qdisc del dev $DEV2 ingress"
+        ]
     }
 ]
index 40ea95ce2eadacf07f0dd8b197f94e91a9138b64..828c185846248031ff598670d393e34a38fa24df 100644 (file)
@@ -22,6 +22,7 @@ TPM2_CC_UNSEAL = 0x015E
 TPM2_CC_FLUSH_CONTEXT = 0x0165
 TPM2_CC_START_AUTH_SESSION = 0x0176
 TPM2_CC_GET_CAPABILITY = 0x017A
+TPM2_CC_GET_RANDOM = 0x017B
 TPM2_CC_PCR_READ = 0x017E
 TPM2_CC_POLICY_PCR = 0x017F
 TPM2_CC_PCR_EXTEND = 0x0182
@@ -357,9 +358,9 @@ class Client:
         self.flags = flags
 
         if (self.flags & Client.FLAG_SPACE) == 0:
-            self.tpm = open('/dev/tpm0', 'r+b')
+            self.tpm = open('/dev/tpm0', 'r+b', buffering=0)
         else:
-            self.tpm = open('/dev/tpmrm0', 'r+b')
+            self.tpm = open('/dev/tpmrm0', 'r+b', buffering=0)
 
     def close(self):
         self.tpm.close()
index 3bb066fea4a01171f31f80df653c3a046c35acdd..d4973be53493226b19dcca5e7140e997ea964e8b 100644 (file)
@@ -158,6 +158,69 @@ class SmokeTest(unittest.TestCase):
             pass
         self.assertEqual(rejected, True)
 
+    def test_read_partial_resp(self):
+        try:
+            fmt = '>HIIH'
+            cmd = struct.pack(fmt,
+                              tpm2.TPM2_ST_NO_SESSIONS,
+                              struct.calcsize(fmt),
+                              tpm2.TPM2_CC_GET_RANDOM,
+                              0x20)
+            self.client.tpm.write(cmd)
+            hdr = self.client.tpm.read(10)
+            sz = struct.unpack('>I', hdr[2:6])[0]
+            rsp = self.client.tpm.read()
+        except:
+            pass
+        self.assertEqual(sz, 10 + 2 + 32)
+        self.assertEqual(len(rsp), 2 + 32)
+
+    def test_read_partial_overwrite(self):
+        try:
+            fmt = '>HIIH'
+            cmd = struct.pack(fmt,
+                              tpm2.TPM2_ST_NO_SESSIONS,
+                              struct.calcsize(fmt),
+                              tpm2.TPM2_CC_GET_RANDOM,
+                              0x20)
+            self.client.tpm.write(cmd)
+            # Read part of the respone
+            rsp1 = self.client.tpm.read(15)
+
+            # Send a new cmd
+            self.client.tpm.write(cmd)
+
+            # Read the whole respone
+            rsp2 = self.client.tpm.read()
+        except:
+            pass
+        self.assertEqual(len(rsp1), 15)
+        self.assertEqual(len(rsp2), 10 + 2 + 32)
+
+    def test_send_two_cmds(self):
+        rejected = False
+        try:
+            fmt = '>HIIH'
+            cmd = struct.pack(fmt,
+                              tpm2.TPM2_ST_NO_SESSIONS,
+                              struct.calcsize(fmt),
+                              tpm2.TPM2_CC_GET_RANDOM,
+                              0x20)
+            self.client.tpm.write(cmd)
+
+            # expect the second one to raise -EBUSY error
+            self.client.tpm.write(cmd)
+            rsp = self.client.tpm.read()
+
+        except IOError, e:
+            # read the response
+            rsp = self.client.tpm.read()
+            rejected = True
+            pass
+        except:
+            pass
+        self.assertEqual(rejected, True)
+
 class SpaceTest(unittest.TestCase):
     def setUp(self):
         logging.basicConfig(filename='SpaceTest.log', level=logging.DEBUG)
index 264d92da32403810ea316912f9cba05eea040215..370bd6c5e6cb3e0e2a88bd985c38fd77a6277812 100644 (file)
@@ -222,7 +222,7 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
                }
        }
 
-       if (used_lrs) {
+       if (used_lrs || cpu_if->its_vpe.its_vm) {
                int i;
                u32 elrsr;
 
@@ -247,7 +247,7 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
        u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
        int i;
 
-       if (used_lrs) {
+       if (used_lrs || cpu_if->its_vpe.its_vm) {
                write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
 
                for (i = 0; i < used_lrs; i++)
index ffd7acdceac7397d126bfb70f010981c1489ae6c..27c958306449f7533c6cfa4aa878ca8d4c995e48 100644 (file)
@@ -102,8 +102,7 @@ static bool kvm_is_device_pfn(unsigned long pfn)
  * @addr:      IPA
  * @pmd:       pmd pointer for IPA
  *
- * Function clears a PMD entry, flushes addr 1st and 2nd stage TLBs. Marks all
- * pages in the range dirty.
+ * Function clears a PMD entry, flushes addr 1st and 2nd stage TLBs.
  */
 static void stage2_dissolve_pmd(struct kvm *kvm, phys_addr_t addr, pmd_t *pmd)
 {
@@ -121,8 +120,7 @@ static void stage2_dissolve_pmd(struct kvm *kvm, phys_addr_t addr, pmd_t *pmd)
  * @addr:      IPA
  * @pud:       pud pointer for IPA
  *
- * Function clears a PUD entry, flushes addr 1st and 2nd stage TLBs. Marks all
- * pages in the range dirty.
+ * Function clears a PUD entry, flushes addr 1st and 2nd stage TLBs.
  */
 static void stage2_dissolve_pud(struct kvm *kvm, phys_addr_t addr, pud_t *pudp)
 {
@@ -899,9 +897,8 @@ int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
  * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation.
  * @kvm:       The KVM struct pointer for the VM.
  *
- * Allocates only the stage-2 HW PGD level table(s) (can support either full
- * 40-bit input addresses or limited to 32-bit input addresses). Clears the
- * allocated pages.
+ * Allocates only the stage-2 HW PGD level table(s) of size defined by
+ * stage2_pgd_size(kvm).
  *
  * Note we don't need locking here as this is only called when the VM is
  * created, which can only be done once.
@@ -1067,25 +1064,43 @@ static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
 {
        pmd_t *pmd, old_pmd;
 
+retry:
        pmd = stage2_get_pmd(kvm, cache, addr);
        VM_BUG_ON(!pmd);
 
        old_pmd = *pmd;
+       /*
+        * Multiple vcpus faulting on the same PMD entry, can
+        * lead to them sequentially updating the PMD with the
+        * same value. Following the break-before-make
+        * (pmd_clear() followed by tlb_flush()) process can
+        * hinder forward progress due to refaults generated
+        * on missing translations.
+        *
+        * Skip updating the page table if the entry is
+        * unchanged.
+        */
+       if (pmd_val(old_pmd) == pmd_val(*new_pmd))
+               return 0;
+
        if (pmd_present(old_pmd)) {
                /*
-                * Multiple vcpus faulting on the same PMD entry, can
-                * lead to them sequentially updating the PMD with the
-                * same value. Following the break-before-make
-                * (pmd_clear() followed by tlb_flush()) process can
-                * hinder forward progress due to refaults generated
-                * on missing translations.
+                * If we already have PTE level mapping for this block,
+                * we must unmap it to avoid inconsistent TLB state and
+                * leaking the table page. We could end up in this situation
+                * if the memory slot was marked for dirty logging and was
+                * reverted, leaving PTE level mappings for the pages accessed
+                * during the period. So, unmap the PTE level mapping for this
+                * block and retry, as we could have released the upper level
+                * table in the process.
                 *
-                * Skip updating the page table if the entry is
-                * unchanged.
+                * Normal THP split/merge follows mmu_notifier callbacks and do
+                * get handled accordingly.
                 */
-               if (pmd_val(old_pmd) == pmd_val(*new_pmd))
-                       return 0;
-
+               if (!pmd_thp_or_huge(old_pmd)) {
+                       unmap_stage2_range(kvm, addr & S2_PMD_MASK, S2_PMD_SIZE);
+                       goto retry;
+               }
                /*
                 * Mapping in huge pages should only happen through a
                 * fault.  If a page is merged into a transparent huge
@@ -1097,8 +1112,7 @@ static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
                 * should become splitting first, unmapped, merged,
                 * and mapped back in on-demand.
                 */
-               VM_BUG_ON(pmd_pfn(old_pmd) != pmd_pfn(*new_pmd));
-
+               WARN_ON_ONCE(pmd_pfn(old_pmd) != pmd_pfn(*new_pmd));
                pmd_clear(pmd);
                kvm_tlb_flush_vmid_ipa(kvm, addr);
        } else {
@@ -1114,6 +1128,7 @@ static int stage2_set_pud_huge(struct kvm *kvm, struct kvm_mmu_memory_cache *cac
 {
        pud_t *pudp, old_pud;
 
+retry:
        pudp = stage2_get_pud(kvm, cache, addr);
        VM_BUG_ON(!pudp);
 
@@ -1121,14 +1136,23 @@ static int stage2_set_pud_huge(struct kvm *kvm, struct kvm_mmu_memory_cache *cac
 
        /*
         * A large number of vcpus faulting on the same stage 2 entry,
-        * can lead to a refault due to the
-        * stage2_pud_clear()/tlb_flush(). Skip updating the page
-        * tables if there is no change.
+        * can lead to a refault due to the stage2_pud_clear()/tlb_flush().
+        * Skip updating the page tables if there is no change.
         */
        if (pud_val(old_pud) == pud_val(*new_pudp))
                return 0;
 
        if (stage2_pud_present(kvm, old_pud)) {
+               /*
+                * If we already have table level mapping for this block, unmap
+                * the range for this block and retry.
+                */
+               if (!stage2_pud_huge(kvm, old_pud)) {
+                       unmap_stage2_range(kvm, addr & S2_PUD_MASK, S2_PUD_SIZE);
+                       goto retry;
+               }
+
+               WARN_ON_ONCE(kvm_pud_pfn(old_pud) != kvm_pud_pfn(*new_pudp));
                stage2_pud_clear(kvm, pudp);
                kvm_tlb_flush_vmid_ipa(kvm, addr);
        } else {
@@ -1451,13 +1475,11 @@ static void stage2_wp_pmds(struct kvm *kvm, pud_t *pud,
 }
 
 /**
-  * stage2_wp_puds - write protect PGD range
-  * @pgd:      pointer to pgd entry
-  * @addr:     range start address
-  * @end:      range end address
-  *
-  * Process PUD entries, for a huge PUD we cause a panic.
-  */
+ * stage2_wp_puds - write protect PGD range
+ * @pgd:       pointer to pgd entry
+ * @addr:      range start address
+ * @end:       range end address
+ */
 static void  stage2_wp_puds(struct kvm *kvm, pgd_t *pgd,
                            phys_addr_t addr, phys_addr_t end)
 {
@@ -1594,8 +1616,9 @@ static void kvm_send_hwpoison_signal(unsigned long address,
        send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb, current);
 }
 
-static bool fault_supports_stage2_pmd_mappings(struct kvm_memory_slot *memslot,
-                                              unsigned long hva)
+static bool fault_supports_stage2_huge_mapping(struct kvm_memory_slot *memslot,
+                                              unsigned long hva,
+                                              unsigned long map_size)
 {
        gpa_t gpa_start;
        hva_t uaddr_start, uaddr_end;
@@ -1610,34 +1633,34 @@ static bool fault_supports_stage2_pmd_mappings(struct kvm_memory_slot *memslot,
 
        /*
         * Pages belonging to memslots that don't have the same alignment
-        * within a PMD for userspace and IPA cannot be mapped with stage-2
-        * PMD entries, because we'll end up mapping the wrong pages.
+        * within a PMD/PUD for userspace and IPA cannot be mapped with stage-2
+        * PMD/PUD entries, because we'll end up mapping the wrong pages.
         *
         * Consider a layout like the following:
         *
         *    memslot->userspace_addr:
         *    +-----+--------------------+--------------------+---+
-        *    |abcde|fgh  Stage-1 PMD    |    Stage-1 PMD   tv|xyz|
+        *    |abcde|fgh  Stage-1 block  |    Stage-1 block tv|xyz|
         *    +-----+--------------------+--------------------+---+
         *
         *    memslot->base_gfn << PAGE_SIZE:
         *      +---+--------------------+--------------------+-----+
-        *      |abc|def  Stage-2 PMD    |    Stage-2 PMD     |tvxyz|
+        *      |abc|def  Stage-2 block  |    Stage-2 block   |tvxyz|
         *      +---+--------------------+--------------------+-----+
         *
-        * If we create those stage-2 PMDs, we'll end up with this incorrect
+        * If we create those stage-2 blocks, we'll end up with this incorrect
         * mapping:
         *   d -> f
         *   e -> g
         *   f -> h
         */
-       if ((gpa_start & ~S2_PMD_MASK) != (uaddr_start & ~S2_PMD_MASK))
+       if ((gpa_start & (map_size - 1)) != (uaddr_start & (map_size - 1)))
                return false;
 
        /*
         * Next, let's make sure we're not trying to map anything not covered
-        * by the memslot. This means we have to prohibit PMD size mappings
-        * for the beginning and end of a non-PMD aligned and non-PMD sized
+        * by the memslot. This means we have to prohibit block size mappings
+        * for the beginning and end of a non-block aligned and non-block sized
         * memory slot (illustrated by the head and tail parts of the
         * userspace view above containing pages 'abcde' and 'xyz',
         * respectively).
@@ -1646,8 +1669,8 @@ static bool fault_supports_stage2_pmd_mappings(struct kvm_memory_slot *memslot,
         * userspace_addr or the base_gfn, as both are equally aligned (per
         * the check above) and equally sized.
         */
-       return (hva & S2_PMD_MASK) >= uaddr_start &&
-              (hva & S2_PMD_MASK) + S2_PMD_SIZE <= uaddr_end;
+       return (hva & ~(map_size - 1)) >= uaddr_start &&
+              (hva & ~(map_size - 1)) + map_size <= uaddr_end;
 }
 
 static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
@@ -1676,12 +1699,6 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
                return -EFAULT;
        }
 
-       if (!fault_supports_stage2_pmd_mappings(memslot, hva))
-               force_pte = true;
-
-       if (logging_active)
-               force_pte = true;
-
        /* Let's check if we will get back a huge page backed by hugetlbfs */
        down_read(&current->mm->mmap_sem);
        vma = find_vma_intersection(current->mm, hva, hva + 1);
@@ -1692,6 +1709,12 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
        }
 
        vma_pagesize = vma_kernel_pagesize(vma);
+       if (logging_active ||
+           !fault_supports_stage2_huge_mapping(memslot, hva, vma_pagesize)) {
+               force_pte = true;
+               vma_pagesize = PAGE_SIZE;
+       }
+
        /*
         * The stage2 has a minimum of 2 level table (For arm64 see
         * kvm_arm_setup_stage2()). Hence, we are guaranteed that we can
@@ -1699,11 +1722,9 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
         * As for PUD huge maps, we must make sure that we have at least
         * 3 levels, i.e, PMD is not folded.
         */
-       if ((vma_pagesize == PMD_SIZE ||
-            (vma_pagesize == PUD_SIZE && kvm_stage2_has_pmd(kvm))) &&
-           !force_pte) {
+       if (vma_pagesize == PMD_SIZE ||
+           (vma_pagesize == PUD_SIZE && kvm_stage2_has_pmd(kvm)))
                gfn = (fault_ipa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT;
-       }
        up_read(&current->mm->mmap_sem);
 
        /* We need minimum second+third level pages */
index ab3f47745d9caaedf263577c4a7263d6386f70b5..44ceaccb18cff19655c2668fcc2f5a2f482af34a 100644 (file)
@@ -754,8 +754,9 @@ static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id,
        u64 indirect_ptr, type = GITS_BASER_TYPE(baser);
        phys_addr_t base = GITS_BASER_ADDR_48_to_52(baser);
        int esz = GITS_BASER_ENTRY_SIZE(baser);
-       int index;
+       int index, idx;
        gfn_t gfn;
+       bool ret;
 
        switch (type) {
        case GITS_BASER_TYPE_DEVICE:
@@ -782,7 +783,8 @@ static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id,
 
                if (eaddr)
                        *eaddr = addr;
-               return kvm_is_visible_gfn(its->dev->kvm, gfn);
+
+               goto out;
        }
 
        /* calculate and check the index into the 1st level */
@@ -812,7 +814,12 @@ static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id,
 
        if (eaddr)
                *eaddr = indirect_ptr;
-       return kvm_is_visible_gfn(its->dev->kvm, gfn);
+
+out:
+       idx = srcu_read_lock(&its->dev->kvm->srcu);
+       ret = kvm_is_visible_gfn(its->dev->kvm, gfn);
+       srcu_read_unlock(&its->dev->kvm->srcu, idx);
+       return ret;
 }
 
 static int vgic_its_alloc_collection(struct vgic_its *its,
@@ -1729,8 +1736,8 @@ static void vgic_its_destroy(struct kvm_device *kvm_dev)
        kfree(its);
 }
 
-int vgic_its_has_attr_regs(struct kvm_device *dev,
-                          struct kvm_device_attr *attr)
+static int vgic_its_has_attr_regs(struct kvm_device *dev,
+                                 struct kvm_device_attr *attr)
 {
        const struct vgic_register_region *region;
        gpa_t offset = attr->attr;
@@ -1750,9 +1757,9 @@ int vgic_its_has_attr_regs(struct kvm_device *dev,
        return 0;
 }
 
-int vgic_its_attr_regs_access(struct kvm_device *dev,
-                             struct kvm_device_attr *attr,
-                             u64 *reg, bool is_write)
+static int vgic_its_attr_regs_access(struct kvm_device *dev,
+                                    struct kvm_device_attr *attr,
+                                    u64 *reg, bool is_write)
 {
        const struct vgic_register_region *region;
        struct vgic_its *its;
@@ -1919,7 +1926,7 @@ static int vgic_its_save_ite(struct vgic_its *its, struct its_device *dev,
               ((u64)ite->irq->intid << KVM_ITS_ITE_PINTID_SHIFT) |
                ite->collection->collection_id;
        val = cpu_to_le64(val);
-       return kvm_write_guest(kvm, gpa, &val, ite_esz);
+       return kvm_write_guest_lock(kvm, gpa, &val, ite_esz);
 }
 
 /**
@@ -2066,7 +2073,7 @@ static int vgic_its_save_dte(struct vgic_its *its, struct its_device *dev,
               (itt_addr_field << KVM_ITS_DTE_ITTADDR_SHIFT) |
                (dev->num_eventid_bits - 1));
        val = cpu_to_le64(val);
-       return kvm_write_guest(kvm, ptr, &val, dte_esz);
+       return kvm_write_guest_lock(kvm, ptr, &val, dte_esz);
 }
 
 /**
@@ -2246,7 +2253,7 @@ static int vgic_its_save_cte(struct vgic_its *its,
               ((u64)collection->target_addr << KVM_ITS_CTE_RDBASE_SHIFT) |
               collection->collection_id);
        val = cpu_to_le64(val);
-       return kvm_write_guest(its->dev->kvm, gpa, &val, esz);
+       return kvm_write_guest_lock(its->dev->kvm, gpa, &val, esz);
 }
 
 static int vgic_its_restore_cte(struct vgic_its *its, gpa_t gpa, int esz)
@@ -2317,7 +2324,7 @@ static int vgic_its_save_collection_table(struct vgic_its *its)
         */
        val = 0;
        BUG_ON(cte_esz > sizeof(val));
-       ret = kvm_write_guest(its->dev->kvm, gpa, &val, cte_esz);
+       ret = kvm_write_guest_lock(its->dev->kvm, gpa, &val, cte_esz);
        return ret;
 }
 
index 408a78eb6a97b13d48ff576d81a113e0c3da49b9..9f87e58dbd4aebaae1c7688fc2251dec21bb191a 100644 (file)
@@ -358,7 +358,7 @@ int vgic_v3_lpi_sync_pending_status(struct kvm *kvm, struct vgic_irq *irq)
        if (status) {
                /* clear consumed data */
                val &= ~(1 << bit_nr);
-               ret = kvm_write_guest(kvm, ptr, &val, 1);
+               ret = kvm_write_guest_lock(kvm, ptr, &val, 1);
                if (ret)
                        return ret;
        }
@@ -409,7 +409,7 @@ int vgic_v3_save_pending_tables(struct kvm *kvm)
                else
                        val &= ~(1 << bit_nr);
 
-               ret = kvm_write_guest(kvm, ptr, &val, 1);
+               ret = kvm_write_guest_lock(kvm, ptr, &val, 1);
                if (ret)
                        return ret;
        }
index abd9c735267784a3085b797d33133a579dc629c6..3af69f2a38667308cf45e13e26540b379cbffb7c 100644 (file)
@@ -867,15 +867,21 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
         * either observe the new interrupt before or after doing this check,
         * and introducing additional synchronization mechanism doesn't change
         * this.
+        *
+        * Note that we still need to go through the whole thing if anything
+        * can be directly injected (GICv4).
         */
-       if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head))
+       if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head) &&
+           !vgic_supports_direct_msis(vcpu->kvm))
                return;
 
        DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
 
-       raw_spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock);
-       vgic_flush_lr_state(vcpu);
-       raw_spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
+       if (!list_empty(&vcpu->arch.vgic_cpu.ap_list_head)) {
+               raw_spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock);
+               vgic_flush_lr_state(vcpu);
+               raw_spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
+       }
 
        if (can_access_vgic_from_kernel())
                vgic_restore_state(vcpu);
index 4325250afd728447630a2decb1333965d5fd2cf8..001aeda4c154d4a9ff5b2e9f73cebdc743c2c29b 100644 (file)
@@ -214,9 +214,9 @@ irqfd_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
 
        if (flags & EPOLLHUP) {
                /* The eventfd is closing, detach from KVM */
-               unsigned long flags;
+               unsigned long iflags;
 
-               spin_lock_irqsave(&kvm->irqfds.lock, flags);
+               spin_lock_irqsave(&kvm->irqfds.lock, iflags);
 
                /*
                 * We must check if someone deactivated the irqfd before
@@ -230,7 +230,7 @@ irqfd_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
                if (irqfd_is_active(irqfd))
                        irqfd_deactivate(irqfd);
 
-               spin_unlock_irqrestore(&kvm->irqfds.lock, flags);
+               spin_unlock_irqrestore(&kvm->irqfds.lock, iflags);
        }
 
        return 0;
index 3547b0d8c91ea2c84e0869b769e9947829fe4286..79e59e4fa3dc6be751079e669e214b7fc614e07f 100644 (file)
@@ -144,18 +144,19 @@ static int setup_routing_entry(struct kvm *kvm,
 {
        struct kvm_kernel_irq_routing_entry *ei;
        int r;
+       u32 gsi = array_index_nospec(ue->gsi, KVM_MAX_IRQ_ROUTES);
 
        /*
         * Do not allow GSI to be mapped to the same irqchip more than once.
         * Allow only one to one mapping between GSI and non-irqchip routing.
         */
-       hlist_for_each_entry(ei, &rt->map[ue->gsi], link)
+       hlist_for_each_entry(ei, &rt->map[gsi], link)
                if (ei->type != KVM_IRQ_ROUTING_IRQCHIP ||
                    ue->type != KVM_IRQ_ROUTING_IRQCHIP ||
                    ue->u.irqchip.irqchip == ei->irqchip.irqchip)
                        return -EINVAL;
 
-       e->gsi = ue->gsi;
+       e->gsi = gsi;
        e->type = ue->type;
        r = kvm_set_routing_entry(kvm, e, ue);
        if (r)
index f25aa98a94df430b6064c31e89ff1d614d8846b8..dc8edc97ba850384680b56f88063f61bebfc96c8 100644 (file)
@@ -2905,6 +2905,9 @@ static long kvm_device_ioctl(struct file *filp, unsigned int ioctl,
 {
        struct kvm_device *dev = filp->private_data;
 
+       if (dev->kvm->mm != current->mm)
+               return -EIO;
+
        switch (ioctl) {
        case KVM_SET_DEVICE_ATTR:
                return kvm_device_ioctl_attr(dev, dev->ops->set_attr, arg);
@@ -2974,12 +2977,14 @@ static int kvm_ioctl_create_device(struct kvm *kvm,
        struct kvm_device_ops *ops = NULL;
        struct kvm_device *dev;
        bool test = cd->flags & KVM_CREATE_DEVICE_TEST;
+       int type;
        int ret;
 
        if (cd->type >= ARRAY_SIZE(kvm_device_ops_table))
                return -ENODEV;
 
-       ops = kvm_device_ops_table[cd->type];
+       type = array_index_nospec(cd->type, ARRAY_SIZE(kvm_device_ops_table));
+       ops = kvm_device_ops_table[type];
        if (ops == NULL)
                return -ENODEV;
 
@@ -2994,7 +2999,7 @@ static int kvm_ioctl_create_device(struct kvm *kvm,
        dev->kvm = kvm;
 
        mutex_lock(&kvm->lock);
-       ret = ops->create(dev, cd->type);
+       ret = ops->create(dev, type);
        if (ret < 0) {
                mutex_unlock(&kvm->lock);
                kfree(dev);